From 0c462224378757d4860c9d4d3d6906f9cf8a6118 Mon Sep 17 00:00:00 2001
From: Tim Daly
Date: Wed, 12 Dec 2018 13:03:45 0500
Subject: [PATCH] books/bookvolbib add references
MIMEVersion: 1.0
ContentType: text/plain; charset=UTF8
ContentTransferEncoding: 8bit
Goal: Proving Axiom Sane
\index{Murray, Toby}
\index{van Oorcchot, P.C.}
\begin{chunk}{axiom.bib}
@misc{Murr18,
author = "Murray, Toby and van Oorcchot, P.C.",
title = {{BP: Formal Proofs, the Fine Print and Side Effects}},
link = "\url{https://people.eng.unimelb.edu.au/tobym/papers/secdev2018.pdf}",
comment = "Version: 26 June 2018 to appear in IEEE SecDev 2018",
abstract =
"Given recent highprofile successes in formal verification of
securityrelated properties (e.g. for seL4), and the rising
popularity of applying formal methods to cryptographic libraries
and security protocols like TLS, we revisit the meaning of
securityrelated proofs about software. We reexamine old issues,
and identify new questions that have escaped scrutiny in the
formal methods literature. We consider what value proofs about
software systems deliver to endusers (e.g. in terms of net
assurance benefits), and at what cost in terms of side effects
(such as changes made to software to facilitate the proofs, and
assumptionrelated deployment restrictions imposed on software if
these proofs are to remain valid in operation). We consider in
detail, for the first time to our knowledge, possible
relationships between proofs and side effects. To make our
discussion concrete, we draw on tangible examples, experience, and
the literature.",
paper = "Murr18.pdf",
keywords = "printed"
}
\end{chunk}
\index{Filliatre, JeanChristophe}
\begin{chunk}{axiom.bib}
@misc{Fill13a,
author = "Filliatre, JeanChristophe",
title = {{Deductive Program Verification}},
year = "2013",
comment = "slides"
}
\end{chunk}
\index{Fredrikson, Matt}
\begin{chunk}{axiom.bib}
@misc{Fred16,
author = "Fredrikson, Matt",
title = {{Automated Program Verification and Testing}},
comment = "slides",
year = "2016"
}
\end{chunk}
\index{Clochard, Martin}
\index{Gondelman, Leon}
\index{Pereira, Mario}
\begin{chunk}{axiom.bib}
@article{Cloc18,
author = "Clochard, Martin and Gondelman, Leon and Pereira, Mario",
title = {{The Matrix Reproved}},
journal = "Journal of Automated Reasoning",
volume = "60",
number = "3",
pages = "365383",
year = "2018",
link = "\url{https://hal.inria.fr/hal01617437/document}",
abstract =
"In this paper we describe a complete solution for the
first challenge of the VerifyThis 2016 competition held at the 18th
ETAPS Forum. We present the proof of two variants for the
multiplication of matrices: a naive version using three nested loops
and Strassen's algorithm. The proofs are conducted using the Why3
platform for deductive program verification and automated theorem
provers to discharge proof obligations. In order to specify and
prove the two multiplication algorithms, we develop a new Why3
theory of matrices. In order to prove the matrix identities on which
Strassen's algorithm is based, we apply the proof by reflection
methodology, which we implement using ghost state. To our knowledge,
this is the first time such a methodology is used under an
autoactive setting.",
paper = "Cloc18.pdf",
keywords = "printed"
}
\end{chunk}
\index{Balzer, Stephanie}
\index{Pfenning, Frank}
\index{Toninho, Bernardo}
\begin{chunk}{axiom.bib}
@article{Balz18,
author = "Balzer, Stephanie and Pfenning, Frank and Toninho, Bernardo",
title = {{A Universal Session Type for Untyped Asynchronous Communication}},
journal = "CONCUR",
volume = "30",
number = "1",
year = "2018",
link = "\url{https://www.cs.cmu.edu/~fp/papers/univtype18.pdf}",
abstract =
"In the simplytyped $\lambda$calculus we can recover the full
range of expressiveness of the untyped $\lambda$calculus solely
by adding a single recursive type
$\mathscr{U} = \mathscr{U}\rightarrow \mathscr{U}$. In contract,
in the sessiontyped $\Pi$calculus, recursion alone is
insufficient to recover the untyped $\Pi$calculus, primarily due
to linearity: each channel just has two unique endpoints. In this
paper, we show that shared channels with a corresponding sharing
semantics (base on the the language SILL$_s$ developed in prior
work) are enough to embed the untyped asynchronous $\Pi$calculus
via a universal shared session type $\mathscr{U}_s$. We show that
our encoding of the asynchronous $\Pi$calculus satisfies
operational correspondence and preserves observable actions (i.e.,
processes are weakly bisimilar to their encoding). Moreover, we
clarify the expressiveness of SILL$_s$ by developing an
operationally correct encoding of SILL$_s$ in the asynchronous
$\Pi$calculus.",
paper = "Balz18.pdf",
keywords = "printed"
}
\end{chunk}
\index{Balzer, Stephanie}
\index{Pfenning, Frank}
\begin{chunk}{axiom.bib}
@inproceedings{Balz17,
author = "Balzer, Stephanie and Pfenning, Frank",
title = {{Manifest Sharing with Session Types}},
booktitle = "Proc. ACM Program. Lang.",
publisher = "ACM",
year = "2017"
link = "\url{https://www.cs.cmu.edu/~fp/papers/icfp17.pdf}",
abstract =
"Sessiontyped languages building on the CurryHoward isomorphism
between linear logic and sessiontyped communication guarantee session
fidelity and deadlock freedom. Unfortunately, these strong guarantees
exclude many naturally occurring programming patterns pertaining to
shared resources. In this paper, we introduce sharing into a
sessiontyped language where types are stratified into linear and
shared layers with modal operators connecting the layers. The
resulting language retains session fidelity but not the absence of
deadlocks, which can arise from contention for shared processes. We
illustrate our language on various examples, such as the dining
philosophers problem, and provide a translation of the untyped
asynchronous $\Pi$calculus into our language.",
paper = "Balz17.pdf",
keywords = "printed"
}
\end{chunk}
\begin{chunk}{axiom.bib}
@article{SCSCP10,
author = "Unknown",
title = {{Symbolic Computation Software Composability Project and
its implementations}},
journal = "ACM Comm. in Computer Algebra",
volume = "44",
number = "4",
year = "2010",
keywords = "printed"
}
\end{chunk}
\index{Bohrer, Brandon}
\index{Crary, Karl}
\begin{chunk}{axiom.bib}
@article{Bohr17,
author = "Bohrer, Brandon and Crary, Karl",
title = {{TWAM: A Certifying Abstract Machine for Logic Programs}},
journal = "ACM Trans. on Computational Logic",
volume = "1",
number = "1",
year = "2017",
link = "\url{https://arxiv.org/pdf/1801.00471.pdf}",
abstract =
"Typepreserving (or typed) compilation uses typing derivations to
certify correctness properties of compilation. We have designed and
implemented a typepreserving compiler for a simplytyped dialect of
Prolog we call TProlog. The crux of our approach is a new certifying
abstract machine which we call the Typed Warren Abstract Machine
(TWAM). The TWAM has a dependent type system strong enough to specify
the semantics of a logic program in the logical framework LF. We
present a soundness metatheorem which constitutes a partial
correctness guarantee: welltyped programs implement the logic
program specified by their type. This metatheorem justifies our design
and implementation of a certifying compiler from TProlog to TWAM.",
paper = "Bohr17.pdf",
keywords = "printed"
}
\end{chunk}
\index{Dershowitz, Nachum}
\index{Gurevich, Yuri}
\begin{chunk}{axiom.bib}
\article{Ders08,
author = "Dershowitz, Nachum and Gurevich, Yuri",
title = {{A Natural Axiomatization of Computability and Proof of
Church's Thesis}},
journal = "Bulletin of Symbolic Logic",
volume = "14",
number = "3",
year = "2008",
abstract =
"Church’s Thesis asserts that the only numeric functions that can be
calculated by effective means are the recursive ones, which are the
same, extensionally, as the Turing computable numeric functions. The
Abstract State Machine Theorem states that every classical algorithm
is behaviorally equivalent to an abstract state machine. This theorem
presupposes three natural postulates about algorithmic
computation. Here, we show that augmenting those postulates with an
additional requirement regarding basic operations gives a natural
axiomatization of computability and a proof of Church’s Thesis, as
Gödel and others suggested may be possible. In a similar way, but with
a different set of basic oper ations, one can prove Turing’s Thesis,
characterizing the effective string functions, and—in particular—the
effectivelycomputable functions on string representations of numbers.",
paper = "Ders08.pdf",
keywords = "printed"
}
\end{chunk}
\index{Boas, Peter van Emde}
\begin{chunk}{axiom.bib}
@article{Boas12,
author = "Boas, Peter van Emde",
title = {{Turing Machines for Dummies}}
journal = "LNCS",
volume = "7147",
pages = "1430",
year = "2012",
abstract =
"Various methods exists in the litearture for denoting the con
figuration of a Turing Machine. A key difference is whether the head
position is indicated by some integer (mathematical representation) or
is specified by writing the machine state next to the scanned tape
symbol (intrinsic representation).
From a mathematical perspective this will make no difference. How
ever, since Turing Machines are primarily used for proving undecidability
and/or hardness results these representations do matter. Based on
a number of applications we show that the intrinsic representation
should be preferred."
paper = "Boas12.pdf",
keywords = "printed"
}
\end{chunk}
\index{Gurevich, Yuri}
\begin{chunk}{axiom.bib}
@article{Gure12,
author = "Gurevich, Yuri",
title = {{What Is An Algorithm?}},
journal = "LNCS",
volume = "7147",
pages = "3142",
year = "2012",
abstract =
"We attempt to put the title problem and the ChurchTuring thesis into
a proper perspective and to clarify some common misconcep tions
related to Turing’s analysis of computation. We examine two approaches
to the title problem, one wellknown among philosophers and another
among logicians.",
paper = "Gure12.pdf",
keywords = "printed"
}
\end{chunk}
\index{Moschovakis, Y.N.}
\begin{chunk}{axiom.bib}
@article{Mosc84,
author = "Moschovakis, Y.N.",
title = {{Abstract Recursion as a Foundation of the Theory of Algorithms}},
journal = "LNCS",
volume = "1104",
pages = "289364",
publisher = "Springer",
year = "1984",
abstract =
"The main object of this paper is to describe an abstract,
(axiomatic) theory of recursion and its connection with some of
the basic, foundational questions of computer science.",
paper = "Mosc84.pdf",
keywords = "printed"
}
\end{chunk}
\index{Pittman, Dan}
\begin{chunk}{axiom.bib}
@misc{Pitt18,
author = "Pittman, Dan",
title = {{Proof Theory Impressionism: Blurring the CurryHoward Line}},
link = "\url{https://www.youtube.com/watch?v=jrVPBAd5Gc}",
year = "2018",
comment = "Strange Loop 2018 Conference"
}
\end{chunk}
\index{Polikarpova, Nadia}
\begin{chunk}{axiom.bib}
@misc{Poli18,
author = "Polikarpova, Nadia",
title = {{TypeDriven Program Synthesis}},
link = "\url{https://www.youtube.com/watch?v=HnOix9TFy1A}",
year = "2018",
abstract =
"A promising approach to improving software quality is to enhance
programming languages with declarative constructs, such as logical
specifications or examples of desired behavior, and to use program
synthesis technology to translate these constructs into efficiently
executable code. In order to produce code that provably satisfies a
rich specification, the synthesizer needs an advanced program
verification mechanism that is sufficiently expressive and fully
automatic. In this talk, I will present a program synthesis technique
based on refinement type checking: a verification mechanism that
supports automatic reasoning about expressive properties through a
combination of types and SMT solving.
The talk will present two applications of typedriven synthesis. The
first one is a tool called Synquid, which creates recursive functional
programs from scratch given a refinement type as input. Synquid is the
first synthesizer powerful enough to automatically discover programs
that manipulate complex data structures, such as balanced trees and
propositional formulas. The second application is a language called
Lifty, which uses typedriven synthesis to repair information flow
leaks. In Lifty, the programmer specifies expressive information flow
policies by annotating the sources of sensitive data with refinement
types, and the compiler automatically inserts access checks necessary
to enforce these policies across the code."
}
\end{chunk}
\index{van der Walt, Paul}
\begin{chunk}{axiom.bib}
@mastersthesis{Walt12,
author = "van der Walt, Paul",
title = {{Reflection in Agda}},
school = "Utrecht University",
year = "2012",
abstract =
"This project explores the recent addition to Agda enabling
reflection, in the style of Lisp, MetaML, and Template Haskell. It
illustrates several possible applications of reflection that arise in
dependently typed programming, and details the limitations of the
current implementation of reflection. Examples of typesafe
metaprograms are given that illustrate the power of reflection coupled
with a dependently typed language. Among other things the limitations
inherent in having quote and unquote implemented as keywords are
highlighted. The fact that lambda terms are returned without typing
information is discussed, and a solution is presented. Also provided
is a detailed users’ guide to the reflection API and a library of
working code examples to illustrate how various common tasks can be
performed, along with suggestions for an updated reflection API in a
future version of Agda.",
paper = "Walk12.pdf",
keywords = "printed"
}
\end{chunk}
\index{Christiansen, David Thrane}
\begin{chunk}{axiom.bib}
@misc{Chri18,
author = "Christiansen, David Thrane",
title = {{A Little Taste of Dependent Types}},
year = "2018",
link = "\url{https://www.youtube.com/watch?v=VxINoKFmS4}",
abstract =
"Dependent types let us use the same programming language for
compiletime and runtime code, and are inching their way towards the
mainstream from research languages like Coq, Agda and Idris. Dependent
types are useful for programming, but they also unite programming and
mathematical proofs, allowing us to use the tools and techniques we
know from programming to do math.
The essential beauty of dependent types can sometimes be hard to find
under layers of powerful automatic tools. The Little Typer is an
upcoming book on dependent types in the tradition of the The Little
Schemer that features a tiny dependently typed language called Pie. We
will demonstrate a proof in Pie that is also a program."
}
\end{chunk}
\index{Gurevich, Yuri}
\begin{chunk}{axiom.bib}
@article{Gure18,
author = "Gurevich, Yuri",
title = {{ChurchTuring Thesis Cannot Possibly Be True}},
year = "2018",
link =
"\url{https://www.microsoft.com/enus/research/video/churchturingthesiscannotpossiblybetrue/}",
abstract =
"The thesis asserts this: If an algorithm A computes a partial
function f from natural numbers to natural numbers then f is partially
recursive, i.e., the graph of f is recursively enumerable.
The thesis has been formulated in 1930s. The only algorithms at the
time were sequential algorithms. Sequential algorithms were
axiomatized in 2000. This axiomatization was used in 2008 to prove the
thesis for sequential algorithms, i.e., for the case where A ranges
over sequential algorithms.
These days, in addition to sequential algorithms, there are parallel
algorithms, distributed algorithms, probabilistic algorithms, quantum
algorithms, learning algorithms, etc.
The question whether the thesis is true in full generality is actively
discussed from 1960s. We argue that, in full generality, the thesis
cannot possibly be true."
}
\end{chunk}
\index{Ellis, Ferris}
\begin{chunk}{axiom.bib}
@misc{Elli18,
author = "Ellis, Ferris",
title = {{Strength in Numbers: Unums and the Quest for Reliable
Arithmetics}},
year = "2018",
link = "\url{https://www.youtube.com/watch?v=nVNYjmj_qbY}",
abstract =
"In the land of computer arithmetic, a tyrant has ruled since its very
beginning: the floating point number. Under its rule we have all
endured countless hardships and cruelties. To this very day the
floating point number still denies 0.1 + 0.2 == 0.3 and returns
insidious infinities to software developers everywhere. But a new hero
has entered fray: the universal number (unum). Can it topple the float
number system and its century long reign?
This talk will introduce unums, explain their benefits over floating
point numbers, and examine multiple real world examples comparing the
two. For those not familiar with floating point numbers and their
pitfalls, this talk also includes a primer on the topic. Code examples
are in Rust, though strong knowledge of the language is not needed."
}
\end{chunk}
\index{Weirich, Stephanie}
\begin{chunk}{axiom.bib}
@misc{Weir18,
author = "Weirich, Stephanie",
title = {{Dependent Types in Haskell}},
year = "2018",
link = "\url{https://www.youtube.com/watch?v=wNa3MMbhwS4}",
abstract =
"What has dependent type theory done for Haskell? Over the past ten
years, the Glasgow Haskell compiler (GHC) has adopted many type system
features inspired by dependent type theory. In this talk, I will
discuss the influence of dependent types on the design of GHC and on
the practice of Haskell programmers. In particular, I will walk
through an extended example and use it to analyze what it means to
program with with dependent types in Haskell. Throughout, I will will
discuss what we have learned from this experiment in language design:
what works now, what doesn't work yet, and what surprised us along
the way."
}
\end{chunk}
\index{Christiansen, David Thrane}
\begin{chunk}{axiom.bib}
@misc{Chri18a,
author = "Christiansen, David Thrane",
title = {{Coding for Types: The Universe Pattern in Idris}},
year = "2018",
link = "\url{https://www.youtube.com/watch?v=AWeT_G04a0A}"
}
\end{chunk}
\index{Altenkirch, Thorsten}
\begin{chunk}{axiom.bib}
@misc{Alte18,
author = "Altenkirch, Thorsten",
title = {{Naive Type Theory}},
year = "2018",
link = "\url{https://www.youtube.com/watch?v=bNG53SA4n48p}"
}
\end{chunk}
\index{Hemann, Jason}
\index{Friedman, Daniel}
\begin{chunk}{axiom.bib}
@misc{Hema14,
author = "Hemann, Jason and Friedman, Daniel",
title = {{Write the Other Half of Your Program}},
year = "2014",
link = "\url{https://www.youtube.com/watch?v=RG9fBbQrVOM}"
}
\end{chunk}
\index{McKenna, Brian}
\begin{chunk}{axiom.bib}
@misc{Mcke14,
author = "McKenna, Brian",
title = {{Idris: Practical Dependent Types with Practical Examples}},
year = "2014",
link = "\url{https://www.youtube.com/watch?v=4i7KrG1Afbk}"
abstract =
"Dependent types turn types into a firstclass language construct and
allows types to be predicated upon values. Allowing types to be
controlled by ordinary values allows us to prove many more properties
about our programs and enables some interesting forms of
metaprogramming. We can do interesting things like write a typesafe
printf or verify algebraic laws of data structures  but while
dependent types are quickly gaining in popularity, it can be tricky to
find examples which are both useful and introductory.
This talk will demonstrate some practical dependent typing examples
(and not sized vectors) using Idris, a language designed for writing
executable programs, rather than just proving theorems."
}
\end{chunk}
\index{Brady, Edwin}
\begin{chunk}{axiom.bib}
@misc{Brad17,
author = "Brady, Edwin",
title = {{Dependent Types in the Idris Programming Language 1}},
year = "2017",
link = "\url{https://www.cs.uoregon.edu/research/summerschool/summer17/topics.php}"
}
\end{chunk}
\index{Brady, Edwin C.}
\begin{chunk}{axiom.bib}
@phdthesis{Brad05,
author = "Brady, Edwin C.",
title = {{Practical Implementation of a Dependently Typed Functional
Programming Language}},
school = "University of Durham",
year = "2005",
link = "\url{https://eb.host.cs.standrews.ac.uk/writings/thesis.pdf}",
abstract =
"Types express a program’s meaning, and checking types ensures that a
program has the intended meaning. In a dependently typed programming
language types are predicated on values, leading to the possibility of
expressing invariants of a program’s behaviour in its type. Dependent
types allow us to give more detailed meanings to programs, and hence
be more confident of their correctness.
This thesis considers the practical implementation of a dependently
typed programming language, using the Epigram notation defined by
McBride and McKinna. Epigram is a high level notation for dependently
typed functional programming elaborating to a core type theory based
on Luo’s UTT, using Dybjer’s inductive families and elimination rules
to implement pattern matching. This gives us a rich framework for
reasoning about programs. However, a na ̈ıve implementation introduces
several runtime overheads since the type sys tem blurs the
distinction between types and values; these overheads include the
duplication of values, and the storage of redundant information and
explicit proofs.
A practical implementation of any programming language should be as
efficient as pos sible; in this thesis we see how the apparent
efficiency problems of dependently typed pro gramming can be overcome
and that in many cases the richer type information allows us to apply
optimisations which are not directly available in traditional
languages. I introduce three storage optimisations on inductive
families; forcing, detagging and collapsing. I further introduce a
compilation scheme from the core type theory to Gmachine code,
including a pattern matching compiler for elimination rules and a
compilation scheme for efficient run time implementation of Peano’s
natural numbers. We also see some low level optimisations for removal
of identity functions, unused arguments and impossible case branches.
As a result, we see that a dependent type theory is an effective base
on which to build a feasible programming language.",
paper = "Brad05.pdf"
}
\end{chunk}
\index{Soare, Robert I.}
\begin{chunk}{axiom.bib}
@inbook{Soar99,
author = "Soare, Robert I.",
title = {{The History and Concept of Computability}},
booktitle = "Handbook of Computability Theory",
publisher = "NorthHolland",
link = "\url{http://www.people.cs.uchicago.edu/~soare/History/handbook.pdf}",
year = "1999",
paper = "Soar99.pdf",
keywords = "printed"
}
\end{chunk}
\index{Post, Emil L.}
\begin{chunk}{axiom.bib}
@article{Post44,
author = "Post, Emil L.",
title = {{Recursively Enumerable Sets of Postive Integers and Their
Decision Problems}},
journal = "Bull. Amer. Math Soc.",
volume = "50",
pages = "284316",
year = "1944",
paper = "Post44.pdf",
keywords = "printed"
}
\end{chunk}
\index{Soare, Robert I.}
\begin{chunk}{axiom.bib}
@inprocedings{Soar07,
author = "Soare, Robert I.",
title = {{Computability and Incomputability}},
booktitle = "Proc. Third Conf. on Computability in Europe}},
year = "2007",
publisher = "SpringerVerlog",
isbn = "139783540730002",
link = "\url{http://www.people.cs.uchicago.edu/~soare/History/siena.pdf}",
abstract =
"The conventional wisdom presented in most computability books and
historical papers is that there were several researchers in the early
1930’s working on various precise definitions and demonstrations of a
function specified by a finite procedure and that they should all
share approximately equal credit. This is incorrect. It was Turing
alone who achieved the characterization, in the opinion of G ̈odel. We
also explore Turing’s oracle machine and its analogous properties in
analysis.",
paper = "Soar07.pdf",
keywords = "printed"
}
\end{chunk}
\index{Soare, Robert I.}
\begin{chunk}{axiom.bib}
@article{Soar96,
author = "Soare, Robert I.",
title = {{Computability and Recursion}},
journal = "Bulletin of Symbolic Logic",
volume = "2",
year = "1996",
pages = "284321",
link = "\url{http://www.people.cs.uchicago.edu/~soare/History/compute.pdf}",
abstract =
"We consider the informal concept of 'computability' or 'effective
calculability' and two of the formalisms commonly used to define
it, '(Turing) computability' and '(general) recursiveness'. We
consider their origin, exact technical definition, concepts,
history, general English meanings, how they became fixed in their
present roles, how they were first and are now used, their impact
on nonspecialists, how their use will affect the future content of
the subject of computability theory, and its connection to other
related areas.
After a careful historical and conceptual analysis of
computability and recursion we make several recommentations in
section 7 about preserving the {\sl intensional} differences
between the concepts of 'computability' and
'recursion'. Specificually we recommend that: the term 'recursive'
should no longer carry the additional meaning of 'computable' or
'decidable', functions defined using Turing machines, register
machines, or their variants should be called 'computable' rather
than 'recursive', we should distinguish the intensional difference
between Church's Thesis and Turing's Thesis, and use the latter
particluarly in dealing with mechanistic questions; the name of
the subject should be '{\sl Computability Theory}' or simply
{\sl Computability} rather than 'Recursive Function Theory'",
paper = "Soar96.pdf",
keywords = "printed"
}
\end{chunk}
\index{van Heijenoort, Jean}
\begin{chunk}{axiom.bib}
@book{Heij67,
author = "van Heijenoort, Jean",
title = {{From Frege to Godel}},
publisher = "Harvard University Press",
isbn = "0674324498",
year = "1967"
}
\end{chunk}
\index{Boolos, George S.}
\index{Burgess, John P.}
\index{Jeffrey, Richard C.}
\begin{chunk}{axiom.bib}
@book{Bool07,
author = "Boolos, George S. and Burgess, John P. and Jeffrey, Richard C.",
title = {{Computability and Logic}},
publisher = "Cambridge University Press",
year = "2007",
isbn = "9780521701464"
}
\end{chunk}
\index{Copeland, B. Jack}
\begin{chunk}{axiom.bib}
@book{Cope04,
author = "Copeland, B. Jack",
title = {{The Essential Turing}},
publisher = "Oxford University Press",
year = "2004",
isbn = "9780198250807"
}
\end{chunk}
\index{Barwise, Jon}
\index{Moss, Lawrence}
\begin{chunk}{axiom.bib}
@book{Barw96,
author = "Barwise, Jon and Moss, Lawrence",
title = {{Vicious Circles}}
publisher = "CSLI Publications",
year = "1996",
isbn = "1575860082"
\end{chunk}
\index{Harper, Robert}
\begin{chunk}{axiom.bib}
@misc{Harp16,
author = "Harper, Robert",
title = {{Practical Foundations for Progamming Languages}},
publisher = "Cambridge University Press",
year = "2016",
isbn = "9781107150300"
}
\end{chunk}
\index{Hilbert, David}
\index{CohnVossen, S.}
\begin{chunk}{axiom.bib}
@book{Hilb52,
author = "Hilbert, David and CohnVossen, S.",
title = {{Geometry and the Imagination}},
publisher = "Chelsea Publishing Company",
year = "1952"
}
\end{chunk}
\index{Heyting, A.}
\begin{chunk}{axiom.bib}
@book{Heyt56,
author = "Heyting, A.",
title = {{Intuitionism: An Introduction}},
publisher = "North Holland",
year = "1956"
}
\end{chunk}
\index{Jeffrey, Richard}
\begin{chunk}{axiom.bib}
@book{Jeff81,
author = "Jeffrey, Richard",
title = {{Formal Logic: Its Scope and Limits}},
publisher = "McGrawHill",
year = "1981",
isbn = "0070323216"
}
\end{chunk}
\index{Cosmo, Roberto Di}
\begin{chunk}{axiom.bib}
@book{Cosm95,
author = "Cosmo, Roberto Di",
title = {{Isomorphisms of TYpes}},
publisher = "Birkhauser",
year = "1995",
isbn = "081763763X"
}
\end{chunk}
\index{Winkler, Franz}
\begin{chunk}{axiom.bib}
@book{Wink84,
author = "Winkler, Franz",
title = {{The Church Rosser Property in Computer Algebra and Special
Theorem Proving}},
publisher = "Wien",
year = "1984",
isbn = "3853695841"
}
\end{chunk}
\index{Mills, Bruce}
\begin{chunk}{axiom.bib}
@book{Mill06,
author = "Mills, Bruce",
title = {{Theoretical Introduction to Programming}},
publisher = "Springer",
year = "2006",
isbn = "1846280214"
}
\end{chunk}
\index{Platzer, Andre}
\begin{chunk}{axiom.bib}
@book{Plat18,
author = "Platzer, Andre",
title = {{Logical Foundations of CyberPhysical Systems}},
publisher = "Springer",
year = "2018",
isbn = "9783319635873"
}
\end{chunk}
\index{Yoshida, Masaaki}
\begin{chunk}{axiom.bib}
@book{Yosh97,
author = "Yoshida, Masaaki",
title = {{Hypergeometric Functions, My Love}},
publisher = "Springer",
year = "1997",
isbn = "9783322901682"
}
\end{chunk}
\index{Dijkstra, Edsger W.}
\index{Scholten, Carel S.}
\begin{chunk}{axiom.bib}
@book{Dijk90,
author = "Dijkstra, Edsger W. and Scholten, Carel S.",
title = {{Predicate Calculus and Program Semantics}},
publisher = "Springer",
year = "1990",
isbn = "0387969578"
}
\end{chunk}
\index{Ehrig, Hartmut}
\index{Mahr, Bernd}
\begin{chunk}{axiom.bib}
@book{Ehri85a,
author = "Ehrig, Hartmut and Mahr, Bernd",
title = {{Fundamentals of Algebraic Specification 2: Module
Specifications and Constraints}},
publisher = "Springer Verlag",
year = "1985",
isbn = "0387517995"
}
\end{chunk}
\index{Boyer, Robert S.}
\index{Moore, J Strother}
\begin{chunk}{axiom.bib}
@book{Boye81a,
author = "Boyer, Robert S. and Moore, J Strother",
title = {{The Correctness Problem in Computer Science}},
publisher = "Academic Press",
year = "1981",
isbn = "0121229203"
}
\end{chunk}
\index{Dijkstra, Edsger W.}
\index{Feijen, W.H.J}
\begin{chunk}{axiom.bib}
@book{Dijk88,
author = "Dijkstra, Edsger W. and Feijen, W.H.J",
title = {{A Method of Programming}},
publisher = "Addison Wesley",
year = "1988",
isbn = "0201175363"
}
\end{chunk}
\index{Kiczales, Gregor}
\index{des Rivieres, Jim}
\index{Bobrow, Daniel G.}
\begin{chunk}{axiom.bib}
@book{Kicz91,
author = "Kiczales, Gregor and des Rivieres, Jim and Bobrow, Daniel G.",
title = {{The Art of the Metaobject Protocol}},
publisher = "MIT Press",
year = "1991",
isbn = "0262610744"
}
\end{chunk}
\index{Herken, Rolf}
\begin{chunk}{axiom.bib}
@book{Herk88,
author = "Herken, Rolf",
title = {{The Universal Turing Machine}},
publisher = "Oxford Science Publications",
year = "1988",
isbn = "0198537743"
}
\end{chunk}
\index{Kripke, Saul A.}
\begin{chunk}{axiom.bib}
@book{Krip80,
author = "Kripke, Saul A.",
title = {{Naming and Necessity}},
publisher = "Harvard University Press",
year = "1980",
isbn = "0674598466"
}
\end{chunk}
\index{Kleene, S.C.}
\index{Vesley, R.E.}
\begin{chunk}{axiom.bib}
@book{Klee65,
author = "Kleene, S.C. and Vesley, R.E.",
title = {{The Foundations of Intuitionistic Mathematics}},
publisher = "NorthHolland Publishing Company",
year = "1965"
}
\end{chunk}
\index{Dijkstra, Edsger W.}
\begin{chunk}{axiom.bib}
@misc{Dijk70,
author = "Dijkstra, Edsger W.",
title = {{Concern for Correctness as a Guiding Principle for Program
Composition}},
year = "1970",
comment = "EWD288"
link = "\url{https://www.cs.utexas.edu/users/EWD/transcriptions/EWD02xx/EWD288.html}",
paper = "Dijk70.html",
keywords = "printed"
}
\end{chunk}
\index{Hahnle, Reiner}
\index{Wallenburg, Angela}
\begin{chunk}{axiom.bib}
@misc{Hahn04,
author = "Hahnle, Reiner and Wallenburg, Angela",
title = {{Using a Software Testing Technique to Improve Theorem Proving}},
year = "2004",
link = "\url{http://www.cse.chalmers.se/~angelaw/papers/lic.pdf}",
comment = "paper follows thesis in file",
abstract =
"Most efforts to combine formal methods and software testing go in the
direction of exploiting formal methods to solve testing problems, most
commonly test case generation. Here we take the reverse viewpoint and
show how the technique of partition testing can be used to improve a
formal proof technique (induction for correctness of loops). We first
compute a partition of the domain of the induction variable, based on
the branch predicates in the program code of the loop we wish to
prove. Based on this partition we derive mechanically a partitioned
induction rule, which is (hopefully) easier to use than the standard
induction rule. In particular, with an induction rule that is
tailored to the program to be verified, less user interaction can be
expected to be required in the proof. We demonstrate with a number of
examples the effectiveness of our method.".
paper = "Wall04.pdf",
keywords = "printed"
}
\end{chunk}
\index{Wallenburg, Angela}
\begin{chunk}{axiom.bib}
@phdthesis{Wall04,
author = "Wallenburg, Angela",
title = {{Inductive Rules for Proving Correctness of Imperative Programs}},
school = "Goteborg University",
link = "\url{http://www.cse.chalmers.se/~angelaw/papers/lic.pdf}",
abstract =
"This thesis is aimed at simplifying the userinteraction in
semiinteractive theorem proving for imperative programs. More
specically , we describe the creation of customised induction rules
that are tailormade for the specific program to verify and thus make
the resulting proof simpler. The concern is in user interaction,
rather than in proof strength. To achiev e this, two different
verication techniques are used.
In the first approach, we develop an idea where a software testing
technique, partition analysis, is used to compute a partition of the
domain of the induction variable, based on the branch predicates in
the program we wish to prove correct. Based on this partition we
derive mechanically a partitioned induction rule, which then inherits
the divideandconquer style of partition analysis, and (hopefully) is
easier to use than the standard (Peano) induction rule.
The second part of the thesis continues with a more thorough
development of the method. Here the connection to software testing is
completely removed and the focus is on inductive theorem proving only.
This time, we make use of failed proof attempts in a theorem prover
to gain information about the problem structure and create the
partition. Then, based on the partition we create an induction rule,
in destructor style, that is customised to make the proving of the
loop simpler.
With the customised induction rules, in comparison to standard (Peano)
induction or Noetherian induction, the required user interaction is
moved to an earlier point in the proof which also becomes more
modularised. Moreover, by using destructor style induction we
circumvent the problem of creating inverses of functions. The
soundness of the customised induction rules created by the method is
shown. Furthermore, the machinery of the theorem prover (KeY) is used
to make the method automatic. The induction rules are developed to
prove the total correctness of loops in an objectoriented language
and we concentrate on integers.",
paper = "Wall04.pdf",
keywords = "printed"
}
\end{chunk}
\index{Olsson, Ola}
\index{Wallenburg, Angela}
\begin{chunk}{axiom.bib}
@msic{Olss04,
author = "Olsson, Ola and Wallenburg, Angela",
title = {{Automatic Generation of Customised Induction Rules for
Proving Correctness of Imperative Programs}},
year = "2004",
comment = "paper follows thesis in file",
link = "\url{http://www.cse.chalmers.se/~angelaw/papers/lic.pdf}",
abstract =
"In this paper we develop a method for automatic construction of
customised induction rules for use in a semiinteractiv e theorem
prover. The induction rules are developed to prove the total
correctness of loops in an objectoriented language. We concentrate
on integers. First we compute a partition of the domain of the
induction variable. Our method makes use of failed proof attempts in
the theorem prover to gain information about the problem structure and
create the partition. Then, based on this partition we create an
induction rule, in destructor style, that is customised to make the
proving of the loop simpler. Our concern is in user interaction,
rather than in proof strength.
Using the customised induction rules, some separation of proof of
control flow and data correctness is achieved, and we find that in
comparison to standard (Peano) induction or Noetherian induction,
simpler user interaction can be expected. Furthermore, by using
destructor style induction we circumvent the problem of creating
inverses of functions. We show the soundness of the customised
induction rules created by the method. Furthermore, we use the
machinery of the theorem prover (KeY) to make the method automatic.
Several interesting areas are also identified that could open up for
a larger range of loops the method can handle, as well as pointing
towards full automation of these cases.",
paper = "Wall04.pdf",
keywords = "printed"
}
\end{chunk}
\index{Olsson, Ola}
\index{Wallenburg, Angela}
\begin{chunk}{axiom.bib}
@msic{Olss05,
author = "Olsson, Ola and Wallenburg, Angela",
title = {{Customised Induction Rules for Proving Correctness of
Imperative Programs}},
year = "2005",
link = "\url{http://www.cse.chalmers.se/~angelaw/papers/olswbg05.pdf}",
abstract =
"In this paper we develop a method for automatic construction of
customised induction rules for use in a semiinteractiv e theorem
prover. The induction rules are developed to prove the total
correctness of loops in an objectoriented language. We concentrate
on integers. First we compute a partition of the domain of the
induction variable. Our method makes use of failed proof attempts in
the theorem prover to gain information about the problem structure and
create the partition. Then, based on this partition we create an
induction rule, in destructor style, that is customised to make the
proving of the loop simpler. Our concern is in user interaction,
rather than in proof strength.
Using the customised induction rules, some separation of proof of
control flow and data correctness is achieved, and we find that in
comparison to standard (Peano) induction or Noetherian induction,
simpler user interaction can be expected. Furthermore, by using
destructor style induction we circumvent the problem of creating
inverses of functions. We show the soundness of the customised
induction rules created by the method. Furthermore, we use the
machinery of the theorem prover (KeY) to make the method automatic.
Several interesting areas are also identified that could open up for
a larger range of loops the method can handle, as well as pointing
towards full automation of these cases.",
paper = "Olss05.pdf",
keywords = "printed"
}
\end{chunk}
\index{Wallenburg, Angela}
\begin{chunk}{axiom.bib}
@misc{Wall09,
author = "Wallenburg, Angela",
title = {{Generalisation of Inductive Formulae based on Proving by
Symbolic Execution}},
year = "2009",
link = "\url{http://www.cse.chalmers.se/~angelaw/papers/awWING2009.pdf}",
abstract =
"Induction is a powerful method that can be used to prove the total
correctness of program loops. Unfortunately the induction proving
process in an interactive theorem prover is often very cumbersome. In
particular it can be difficult to find the right induction formula.
We describe a method for generalising induction formulae by analysing
a symbolic proof attempt in a semiinteractive firstorder theorem
prover. Based on the proof attempt we introduce universally
quantified variables, metavariables and sets of constraints on these.
The constraints describe the conditions for a successful proof. By
the help of examples, we outline some classes of problems and their
associated constraint solutions, and possible ways to automate the
constraint solving.",
paper = "Wall09.pdf",
keywords = "printed"
}
\end{chunk}
\index{Moy, Yannick}
\index{Wallenburg, Angela}
\begin{chunk}{axiom.bib}
@misc{Moyx10,
author = "Moy, Yannick and Wallenburg, Angela",
title = {{Tokeneer: Beyond Formal Program Verification}},
year = "2010",
link = "\url{http://www.cse.chalmers.se/~angelaw/papers/ERTS2010.pdf}",
abstract =
"Tokeneer is a smallsized (10 kloc) security system which was
formally developed and verified by Praxis at the request of NSA, using
SPARK technology. Since its opensource release in 2008, only two
problems were found, one by static analysis, one by code review. In
this paper, we report on experiments where we systematically applied
various static analysis tools (compiler, bugfinder, proof tools)
and focused code reviews to all of the SPARK code (formally verified)
and supporting Ada code (not formally verified) of the Tokeneer
Project. We found 20 new problems overall, half of which are defects
that could lead to a system failure should the system be used in its
current state. Only two defects were found in SPARK code, which
confirms the benefits of applying formal verification to reach
higher levels of assurance. In order to leverage these benefits to
code that is was not formally verified from the start, we propose to
associate static analyses and dynamic analyses around a common
expression of properties and constraints. This is the goal of starting
project HiLite, which involves AdaCore and Altran Praxis together
with several industrial users and research labs.",
paper = "Moyx10.pdf",
keywords = "printed"
}
\end{chunk}
\index{Baker, Henry G.}
\begin{chunk}{axiom.bib}
@article{Bake92,
author = "Baker, Henry G.",
title = {{A Decision Procedure for Common Lisp's SUBTYPEP Predicate}},
journal = "Lisp and Symbolic Computation",
volume = "5",
number = "3",
year = "1992",
pages = "157190"
link = "\url{}",
abstract =
"Common Lisp [CL84,CL90] includes a dynamic datatype system of
moderate complexity, as well as predicates for checking the types of
language objects. Additionally, an interesting predicate of two 'type
specifiers'— SUBTYPEP —is included in the language. This subtypep
predicate provides a mechanism with which to query the Common Lisp
type system regarding containment relations among the various builtin
and userdefined types. While subtypep is rarely needed by an
applications programmer, the efficiency of a Common Lisp
implementation can depend critically upon the quality of its subtypep
predicate: the runtime system typically calls upon subtypep to decide
what sort of representations to use when making arrays; the compiler
calls upon subtypep to interpret user declarations, on which efficient
data representation and code generation decisions are based.
As might be expected due to the complexity of the Common Lisp type
system, there may be type containment questions which cannot be
decided. In these cases subtypep is expected to return 'can't
determine', in order to avoid giving an incorrect
answer. Unfortunately, most Common Lisp implementations have abused
this license by answering 'can't determine' in all but the most
trivial cases. In particular, most Common Lisp implementations of
SUBTYPEP fail on the basic axioms of the Common Lisp type system
itself [CL84,p.33]. This situation is particularly embarrassing for
Lisp—the premier 'symbol processing language'—in which the
implementation of complex symbolic logical operations should be
relatively easy. Since subtypep was presumably included in Common Lisp
to answer the hard cases of type containment, this 'lazy evaluation'
limits the usefulness of an important language feature.
This paper shows how those type containment relations of Common Lisp
which can be decided at all, can be decided simply and quickly by a
decision procedure which can dramatically reduce the number of
occurrences of the 'can't determine' answer from subtypep . This
decision procedure does not require the conversion of a type specifier
expression to conjunctive or disjunctive normal form, and therefore
does not incur the exponential explosion in space and time that such a
conversion would entail.
The lattice mechanism described here for deciding subtypep is also
ideal for performing type inference [Baker90]; the particular
implementation developed here, however, is specific to the type system
of Common Lisp [Beer88].",
paper = "Bake92.pdf",
keywords = "printed"
}
\end{chunk}
\index{Haynes, Christopher T.}
\index{Friedman, Daniel P.}
\begin{chunk}{axiom.bib}
@techreport{Hayn87,
author = "Haynes, Christopher T. and Friedman, Daniel P."
title = {{Embedding Continuations in Procedural Objects}},
type = "technical report",
institution = "Indiana University",
number = "213",
year = "1987",
abstract =
"Continuations, when available as firstclass objects, provide a
general control abstraction in programming languages. They
liberate the programmer from specific control structures,
increasing programming language extensibility. Such continuations
may be extended by embedding them in procedural objects. This
technique is first used to restore a fluid environment when a
continuation object is invoked. We then consider techniques for
constraining the power of continuations in the interest of
security and efficiency. Domain mechanisms, which create dynamic
barriers for enclosing control, are implemented using
fluids. Domains are then used to implement an unwindprotect
facility in the presence of firstclass continuations. Finally, we
present two mechanisms, windunwind and dynamicwind, that
generalizes unwindprotect.",
paper = "Hayn87.pdf",
keywords = "printed"
}
\end{chunk}
\index{Goldblatt, Robert}
\begin{chunk}{axiom.bib}
@book{Gold84,
author = "Goldblatt, Robert",
title = {{Topoi: The Categorical Analysis of Logic}},
year = "1984",
publisher = "Dover",
isbn = "9780486450261"
}
\end{chunk}
\index{Clocksin, William F.}
\begin{chunk}{axiom.bib}
@book{Cloc91,
author = "Clocksin, William F.",
title = {{Clause and Effect}},
year = "1991",
publiser = "Springer",
isbn = "3540629718"
}
\end{chunk}
\index{Brooks, Rodney A.}
\index{Gabriel, Richard P.}
\index{Steele, Guy L.}
\begin{chunk}{axiom.bib}
@article{Broo82a,
author = "Brooks, Rodney A. and Gabriel, Richard P. and Steele, Guy L.",
title = {{An Optimizing Compiler for Lexically Scoped LISP}},
journal = "ACM SIGPLAN Notices",
volume = "17",
number = "6",
year = "1982",
pages = "261275',
abstract =
"We are developing an optimizing compiler for a dialect of the
LISP language. The current target architecture is the S1, a
multiprocessing supercomputer designed at Lawrence Livermore
National Laboraty. While LISP is usually thought of as a language
primarily for symbolic processing and list manipulation, this
compiler is also intended to compete with the S1 PASCAL and
FORTRAN compilers for quality of compiled numerical code. The S1
is designed for extremely highspeed signal processing as well as
for symbolic computation; it provides primitive operations on
vectors of floatingpoint and complex numbers. The LISP compiler
is designed to exploit the architecture heavily.",
The compiler is structurally and conceptually similar to the
BLISS11 compiler and the compilers produced by PQCC. In
particular, the TNBIND technique has been borrowed and extended.
Particularly interesting properties of the compiler are:
\begin{itemize}
\item Extensive use of sourcetosource transformation
\item Use of an intermediate form that is expressionoriented
rather than statement oriented
\item Exploitation of tailrecursive function calls to represent
coplex control structures
\item Efficient compilation of code that can manipulate procedural
object that require heapallocated environments
\item Smooth runtime interfacing between the ``numerical world''
and ``LISP pointer world'', including automatic stack allocation
of objects that ordinarlly must be heapallocated
\end{itemize}
Each of these techniques has been used before, but we believe
their synthesis to be original and unique.
The compiler is tabledriven to a great extent, more so than
BLISS11 but less so than a PQCC compiler. We expect to be able to
redirect the compiler to other target architectures such as the
VAX or PDP10 with relatively little effort.",
paper = "Broo82a.pdf",
keywords = "printed"
}
\end{chunk}
\index{Brooks, Rodney A.}
\index{Gabriel, Richard P.}
\index{Steele, Guy L.}
\begin{chunk}{axiom.bib}
@inproceedings{Broo82,
author = "Brooks, Rodney A. and Gabriel, Richard P. and Steele, Guy L.",
title = {{S1 Common Lisp Implementation}},
booktitle = "Proc ACM Symp. on Lisp and functional programming",
publisher = "ACM",
year = "1982",
isbn = "0897910826",
abstract =
"We are developing a Lisp implementation for the Lawrence
Livermore National Laboratory S1 Mark IIA computer. The dialect
of Lisp is an extension of COMMON Lisp [Steele;1982], a descendant
of Maclisp [Moon;1974] and Lisp Machine Lisp [Weinreb;1981].",
paper = "Broo82.pdf",
keywords = "printed"
}
\end{chunk}
\index{Beer, Randall D.}
\begin{chunk}{axiom.bib}
@article{Beer87,
author = "Beer, Randall D.",
title = {{Preliminary Report on A Practical Type Inference System
for Common Lisp}},
journal = "Lisp Pointers",
volume = "1",
number = "2",
year = "1987",
pages = "511",
paper = "Beer87.pdf",
keywords = "printed"
}
\end{chunk}
\index{Brooks, Rodney A.}
\index{Gabriel, Richard P.}
\begin{chunk}{axiom.bib}
@inproceedings{Broo84,
author = "Brooks, Rodney A. and Gabriel, Richard P.",
title = {{A Critique of Common Lisp}},
booktitle = "Symposium on Lisp and Functional Programming",
pages = "18",
year = "1984",
abstract =
"A major goal of the COMMON LISP committee was to define a Lisp
language with sufficient power and generality that people would be
happy to stay within its confines and thus write inherently
transportable code. We argue that the resulting language
definition is too large for many shortterm and mediumterm
potential applications. In addition many parts of COMMON LISP
cannot be implemented very efficiently on stock hardware. We
further argue that the very generality of the design with its
different efficieny profiles on different architectures works
agains the goal of transportability.",
paper = "Broo84.pdf",
keywords = "printed"
}
\end{chunk}
\index{Cousot, Patrick}
\index{Cousot, Radhia}
\begin{chunk}{axiom.bib
@inproceedings{Cous77,
author = "Cousot, Patrick and Cousot, Radhia",
title = {{Abstract Interpretation: A Unified Lattice Model for
Static Analysis of Programs by Construction or
Approximation of Fixpoints}},
booktitle = "Symp. on Principles of Programming Languages",
pages = "238252",
year = "1977",
paper = "Cous77.pdf",
keywords = "printed"
}
\end{chunk
\index{Rees, Jonathan}
\begin{chunk}{axiom.bib}
@article{Rees82,
author = "Rees, Jonathan",
title = {{T: A Dialect of Lisp or, LAMBDA: The Ultimate Software Tool}},
year = "1982",
abstract =
"The T proejct is an experiment in language design and
implementation. Its purpose is to test the thesis developed by
Steele and Sussman in their series of papers about the Scheme
language: that Scheme may be used as the basis for a practical
programming language of exceptional expressive power, and that
implementations of Scheme could perform better than other Lisp
systems, and competitively with implementations of programming
languages, such as C and Bliss, which are usually considered to be
inherently more efficient than Lisp on conventaional machine
architectures. We are developing a portable implementation of T,
currently targeted for the VAX under the Unix and VMS operating
systems and for the Apollo, a MC68000based workstation.",
paper = "Rees82.pdf",
keywords = "printed"
}
\end{chunk}
\index{Shivers, Olin}
\begin{chunk}{axiom.bib}
@techreport{Shiv90,
author = "Shivers, Olin",
title = {{DataFlow Analysis and Type Recovery in Scheme}}
, year = "1990",
type = "technical report",
institution = "Carnegie Mellon University",
number = "CMUCS90115",
abstract =
"The lack of explicit type information in Scheme prevents the
application of many compiler optimizations. Implicit type
information can oftentimes be recovered by analyzing the flow of
control through primitive operations and conditionals. Such flow
analysis, however, is difficult in the presence of higherorder
functions. This paper presents a technique for recovering type
information fro mScheme programs using flow analysis. The
algorithm used for flow analysis is semantically based, and is
novel in that it correctly handles the ``environment flow''
problem. Several examples of a working implementation of the
analysis are presented. The techniques are applicable not only to
Scheme type recovery, but also to related languages, such as
Common Lisp and Standard ML, and to related dataflow analyses,
such as range analysis, future analysis, and copy propagation.",
paper = "Shiv90.pdf",
keywords = "printed"
}
\end{chunk}
\index{Steenkiste, Peter}
\index{Hennessy, John}
\begin{chunk}{axiom.bib}
@article{Stee87,
author = "Steenkiste, Peter and Hennessy, John",
title = {{Tags and Type Checking in LISP: Hardware and Software Approaches}},
journal = ACM SIGPLAN Notices",
volume = "22",
number = "10",
pages = "5059",
year = "1987",
abstract =
"One of the major factors that distinguishes LISP from many other
languages (Pascal, C, Fortran, etc.) is the need for runtime type
checking. Runtime type checking is implemented by adding to each
data object a tag that encodes type information. Tags must be
compared for type compatibility, removed when using the data, and
inserted when new data items are created. This tag manipulation,
together with other work related to dynamic type checking and
generic operations, constitutes a significant component of the
execution time of LISP programs. This has led both to the
development of LISP machines that support tag checking in hardware
and to the avoidance of type checking by users running on stock
hardware. To understand the role and necessity of specialpurpose
hardware for tag handling, we first measure the cost of type
checking operations for a group of LISP programs. We then examine
hardware and software implementations of tag operations and
estimate the cost of tag handling with the different tag
implementation schemes. The data shows that minimal levels of
support provide most of the benefits, and that tag operations can
be relatively inexpensive, even when no special hardware support
is present.",
paper = "Stee87.pdf",
keywords = "printed"
}
\end{chunk}
\index{Brooks, Rodney A.}
\index{Posner, David B.}
\index{McDonald, James L.}
\index{White, Jon L.}
\index{Benson, Eric},
\index{Gabriel, Richard P.}
\begin{chunk}{axiom.bib}
@inproceedings{Broo86,
author = "Brooks, Rodney A. and Posner, David B. and McDonald, James L.
and White, Jon L. and Benson, Eric} and Gabriel, Richard P.",
title = {{Design of An Optimising, Dynamically Retargetable Compiler
for Common Lisp}},
booktitle = "Conf. on Lisp and Functional Programming",
publisher = "ACM",
pages = "6785",
year = "1986",
abstract =
"We outline the components of a retargetable crosscompiler for
the Common Lisp language. A description is given of a method for
modeling the various hardware features in the compiler's database,
and a breakdown is shown of the compiler itself into various
machineindependent and machinedependent modules. A novel feature
of this development is the dynamic nature of the retargeting:
Databses for multiple hardware architectures are a standard part
of the compiler, and the internal interfaces used by the compiler
are such that the machinedependent modules may be instantly
switched from one to another. Examples of generated code in
several environments will be given to demonstrate the high quality
of the output available, even under this very modular approach..",
paper = "Broo86.pdf",
keywords = "printed"
}
\end{chunk}
\index{Kaplan, Marc A.}
\index{Ullman, Jeffrey D.}
\begin{chunk}{axiom.bib}
@article{Kapl80,
author = "Kaplan, Marc A. and Ullman, Jeffrey D.",
title = {{A Scheme for the Automatic Inference ov Variable Types}},
journal = "J. ACM",
volume = "27",
number = "1",
pages = "128.145",
year = "1980",
abstract =
"In this paper an algorithm for the determination of runtime
types in a programming language requiring no type declarations is
presented. It is demonstrated that this algorithm is superior to
other published algorithms in the sense that it produces stronger
assertions about the set of possible types for variables than do
other known algorithms. In fact this algorithm is shown to be the
best possible algorithm from among all those that use the same set
of primitive operators.",
paper = "Kapl80.pdf",
keywords = "printed"
}
\end{chunk}
\index{Ma, KwanLiu}
\index{Kessler, Robert R.}
\begin{chunk}{axiom.bib}
@article{Maxx90,
author = "Ma, KwanLiu and Kessler, Robert R.",
title = {{TICL  A Type Inference System for Common Lisp}},
journal = "Software  Practice and Experience",
volume = "20",
number = "6",
pages = "593623",
year = "1990",
abstract =
"Most current Common Lisp compilers generate more efficient code
when supplied with data type information. However, in keeping with
standard Lisp programming style, most programmers are reluctant to
provide type information; they simply allow the runtime type
system to managed the data types accordingly. To fill this gap, we
have designed and implemented a type inference system for Common
Lisp (TICL). TICL takes a Lisp program that has been annotated
with a few type declarations, adds as many declarations as
possible, and produces a type declared program. The compiler can
then use this information to generate more efficient
code. Measurements indicate that a 20 per cent speed improvement
can generally be achieved.",
paper = "Maxx90.pdf",
keywords = "printed"
}
\end{chunk}
\index{Henglein, Friedrich}
\begin{chunk}{axiom.bib}
@phdthesis{Heng89,
author = "Henglein, Friedrich",
title = {{Polymorphic Type Inference and SemiUnification}},
school = "Rutgers",
year = "1989",
link =
"\url{http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.388.1275&rep=rep1&type=pdf}",
abstract =
"In the last ten years declarationfree programming languages with
a polymorphic typing discipline (ML, B) have been developed to
approximate the flexibility and conciseness of dynamically typed
languages (LISP, SETL) while retaining the safety and execution
efficiency of conventional statically typed languages (Algol68,
Pascal). These polymorphic languages can be type checked at
compile time, yet allow functions whose arguments range over a
variety of types.
We investigate several polymorphic type systems, the most powerful
of which, termed MilnerMycroft Calculus, extends the socalled
letpolymorphism found in, e.g. ML with a polymorphic typing rule
for recursive defiitions. We show that semiunification, the
problem of solving inequalities over firstorder terms,
characterizes type checking in the MilnerMycroft Calculus to
polynomial time, even in the restricted case were nested
definitions are disallowed. This permits us to extend some
infeasibility results for related combinatorial problems to type
inference and to correct several claims and statements in the
literature.
We prove the existence of unique most general solutions of term
inequalities, called most general semiunifiers, and present an
algorithm for computing them that terminates for all known inputs
due to a novel ``extended occurs check''. We conjecture this
algorithm to be uniformly terminating even though, at present,
general semiunification is not known to be decidable. We prove
termination of our algorithm for a restricted case of
semiunification that is of independent interest.
Finally, we offer an explanation for the apparent practicality of
polymorphic type inference in the face of theoretical
intractability results.",
paper = "Heng89.pdf",
}
\end{chunk}
\index{Nimmer, Jeremy W.}
\index{Ernst, Michael D.}
\begin{chunk}{axiom.bib}
@inproceedings{Nimm02,
author = "Nimmer, Jeremy W. and Ernst, Michael D.",
title = {{Automatic Generation of Program Specifications}},
booktitle = "Symp. on Software Testing and Analysis",
publisher = "ACM Press",
year = "2002",
abstract =
"Producing specifications by dynamic (runtime) analysis of program
executions is potentially unsound, because the analyzed executions may
not fully characterize all possible executions of the program. In
practice, how accurate are the results of a dynamic analysis? This
paper describes the results of an investigation into this question,
determining how much specifications generalized from program runs
must be changed in order to be verified by a static checker.
Surprisingly, small test suites captured nearly all program behavior
required by a specific type of static checking; the static checker
guaranteed that the implementations satisfy the generated specifications
, and ensured the absence of runtime exceptions. Measured
against this verification task, the generated specifications scored
over 90 percent on precision, a measure of soundness, and on recall, a
measure of completeness.
This is a positive result for testing, because it suggests that
dynamic analyses can capture all semantic information of interest for
certain applications. The experimental results demonstrate that a
specific technique, dynamic invariant detection, is effective at
generating consistent, sufficient specifications for use by a static
checker. Finally, the research shows that combining static and
dynamic analyses over program specifications has benefits for users of
each technique, guaranteeing soundness of the dynamic analysis and
lessening the annotation burden for users of the static analysis.",
paper = "Nimm02.pdf",
keywords = "printed"
}
\end{chunk}
\index{Hantler, Sidney L.}
\index{King, James C.}
\begin{chunk}{axiom.bib}
@article{Hant76,
author = "Hantler, Sidney L. King, James C.",
title = {{An Introduction to Proving the Correctness of Programs}},
journal = "ACM Computing Surveys",
volume = "8",
number = "3",
pages = "331353",
year = "1976",
abstract =
"This paper explains, in an introductory fashion, the method of
specifying the correct behavior of a program by the use of
input/output assertions and describes one method for showing that the
program is correct with respect to those assertions. An initial
assertion characterizes conditions expected to be true upon entry to
the program and a final assertion characterizes conditions expected to
be true upon exit from the program. When a program contains no
branches, a technique known as symbolic execution can be used to show
that the truth of the initial assertion upon entry guarantees the
truth of the final assertion upon exit. More generally, for a program
with branches one can define a symbolic execution tree. If there is
an upper bound on the number of times each loop in such a program may
be executed, a proof of correctness can be given by a simple traversal
of the (finite) symbolic execution tree.
However, for most programs, no fixed bound on the number of times each
loop is executed exists and the corresponding symbolic execution trees
are infinite. In order to prove the correctness of such programs, a
more general assertion structure must be provided. The symbolic
execution tree of such programs must be traversed inductively rather
than explicitly. This leads naturally to the use of additional
assertions which are called ``inductive assertions.''",
paper = "Hant76.pdf",
keywords = "printed",
}
\end{chunk}
\index{Wirth, Niklaus}
\begin{chunk}{axiom.bib}
@misc{Wirt15,
author = "Wirth, Niklaus",
title = {{The Design of a RISC Architecture and its Implementation
with an FPGA}},
link =
"\url{https://www.inf.ethz.ch/personal/wirth/FPGArelatedWork/RISC.pdf}",
year = "2015",
paper = "Wirt15.pdf",
keywords = "printed"
}
\end{chunk}
\index{Remy, Didier}
\begin{chunk}{axiom.bib}
@techreport{Remy92,
author = "Remy, Didier",
title = {{Extensions to ML type system with a sorted equation theory
on types}},
type = "research report",
institution = "INRIA",
number = "RR1766",
year = "1992",
abstract =
"We extend the ML language by allowing a sorted regular equational
theory on types for which unification is decidable and unitary. We
prove that the extension keeps principlal typings and subject
reduction. A new set of typing rules is proposed so that type
generalization is simpler and more efficient. We consider typing
problems as general unification problems, which we solve with a
formalism of unificands. Unificands naturally deal with sharing
between types and lead to a more efficient type inference
algorithm. The use of unificands also simplifies the proof of
correctness of the algorithm by splitting it into more elementary
steps.",
paper = "Remy92.pdf",
keywords = "printed"
}
\end{chunk}
\index{Atkey, Robert}
\begin{chunk}{axiom.bib}
@inproceedings{Atke18,
author = "Atkey, Robert",
title = {{Syntax and Semantics of Quantitative Type Theory}},
booktitle = "LICS",
year = "2018",
publisher = "ACM",
isbn = "9781450355834",
abstract =
"We present Quantitative Type Theory, a Type Theory that records
usage information for each variable in a judgement, based on a
previous system by McBride. The usage information is used to give
a realizability semantics using a variant of Linear Combinatory
Algebras, refining the usual realizability semantics of Type
Theory by accurately tracking resource behaviour. We define the
semantics in terms of Quantitative Categories with Families, a
novel extension of Categories with Families for modelling resource
sensitive type theories.",
paper = "Atke18.pdf",
keywords = "printed"
}
\end{chunk}
\index{Gabriel, Richard P.}
\begin{chunk}{axiom.bib}
@misc{Gabr10,
author = "Gabriel, Richard P.",
title = {{A Reivew of The Art of the Metaobject Protocol}},
year = "2010",
paper = "Gabr10.pdf",
keywords = "printed"
}
\end{chunk}
\index{Church, Alonzo}
\begin{chunk}{axiom.bib}
@book{Chur41,
author = "Church, Alonzo",
title = {{The Calculi of Lambda Conversion}},
year = "1941",
publisher = "Princeton University Press",
paper = "Alon41*.pdf"
}
\end{chunk}
\index{Chow, Timothy Y.}
\begin{chunk}{axiom.bib}
@misc{Chow18,
author = "Chow, Timothy Y.",
title = {{The Consistency of Arithmetic}},
year = "2018",
link = "\url{http://timothychow.net/consistent.pdf}",
paper = "Chow18.pdf",
keywords = "printed"
}
\end{chunk}
\index{Torlak, Emina}
\begin{chunk}{axiom.bib}
@misc{Torl17,
author = "Torlak, Emina",
title = {{Synthesis and Verifcation for All}},
link = "\url{https://www.youtube.com/watch?v=KpDyuMIb_E0}",
year = "2017"
}
\end{chunk}
\index{Burstall, R.M.}
\begin{chunk}{axiom.bib}
@article{Burs69,
author = "Burstall, R.M.",
title = {{Proving Properties of Programs by Structural Induction}},
journal = "Computer Journal",
volume = "12",
number = "1",
pages = "4148",
link = "\url{http://www.cse.chalmers.se/edu/year/2010/course/DAT140_Types/Burstall.pdf}",
year = "1969",
abstract =
"This paper discusses the technique of structural induction for
proving theorems about programs. This technique is closely related to
recursion induction but makes use of the inductive definition of the
data structures handled by the programs. It treats programs with
recursion but without assignments or jumps. Some syntactic extensions
to Landin's functional programming language ISWIM are suggested which
make it easier to program the manipulation of data structures and to
develop proofs about such programs. Two sample proofs are given to
demonstrate the technique, one for a tree sorting algorithm and one
for a simple compiler for expressions.",
paper = "Burs69.pdf",
keywords = "printed"
}
\end{chunk}
\index{Reynolds, John C.}
\begin{chunk}{axiom.bib}
@inbook{Reyn97,
author = "Reynolds, John C.",
title = {{The Essence of Algol}},
booktitle = "ALGOLlike languages, Volume 1",
publisher = "Birkhauser Boston Inc.",
isbn = "0817638806",
year = "1997",
abstract =
"Although Algol 60 has been uniquely in uential in programming
language design, its descendants have been signicantly dierent than
their prototype. In this paper, we enumerate the principles that we
believe embody the essence of Algol, describe a model that satises
these principles, and illustrate this model with a language that,
while more uniform and general, retains the character of Algol.",
paper = "Reyn97.pdf",
keywords = "printed"
}
\end{chunk}
\index{Sannella, Donald}
\begin{chunk}{axiom.bib}
@article{Sann86,
author = "Sannella, Donald",
title = {{Extended ML: An InstitutionIndependent Framework for
Formal Program Development}},
booktitle = "Proc. Workshop on Category Theory and Computer Programming",
journal = "LNCS",
volume = "240",
pages = "364389",
year = "1986",
paper = "Sann86.pdf"
}
\end{chunk}
\index{Sannella, Donald}
\index{Tarlecki, Andrzej}
\begin{chunk}{axiom.bib}
@article{Sann87,
author = "Sannella, Donald and Tarlecki, Andrzej",
title = {{On Observational Equivalence and Algebraic Specification}},
journal = "J. of Computer and System Sciences",
volume = "34",
pages = "150178",
year = "1987"
abstract =
"The properties of a simple and natural notion of observational
equivalence of algebras and the corresponding specificationbuilding
operation are studied. We begin with a defmition of observational
equivalence which is adequate to handle reachable algebras only, and
show how to extend it to cope with unreachable algebras and also how
it may be generalised to make sense under an arbitrary institution.
Behavioural equivalence is treated as an important special case of
observational equivalence, and its central role in program development
is shown by means of an example.",
paper = "Sann87.pdf"
}
\end{chunk}
\index{Sannella, Donald}
\index{Tarlecki, Andrzej}
\begin{chunk}{axiom.bib}
@article{Sann88,
author = "Sannella, Donald and Tarlecki, Andrzej",
title = {{Specifications in an Arbitrary Institution}},
journal = "Information and Computation",
volume = "76",
pages = "165210",
year = "1988",
abstract =
"A formalism for constructing and using axiomatic specifications in an
arbitrary logical system is presented. This builds on the framework
provided by Goguen and Burstall’s work on the notion of an institution
as a formalisation of the concept of a logical system for writing
specifications. We show how to introduce free variables into the
sentences of an arbitrary institution and how to add quantitiers which
bind them. We use this foundation to define a set of primitive
operations for building specifications in an arbitrary institution
based loosely on those in the ASL kernel specification language. We
examine the set of operations which results when the definitions are
instantiated in institutions of total and partial tirstorder logic
and compare these with the operations found in existing specification
languages. We present proof rules which allow proofs to be conducted
in specifications built using the operations we define. Finally, we
introduce a simple mechanism for defining and applying parameterised
specifications and briefly discuss the program development process.",
paper = "Sann88.pdf"
}
\end{chunk}
\index{Sannella, Donald}
\index{Tarlecki, Andrzej}
\begin{chunk}{axiom.bib}
@article{Sann89,
author = "Sannella, Donald and Tarlecki, Andrzej",
title = {{Toward Formal Development of ML Programs: Foundations and
Methodology (Extended Abstract}}},
year = "1989",
abstract =
"A methodology is presented for the formal development of modular
Standard ML programs from specifications. Program development proceeds
via a sequence of design (modular decomposition), coding and
refinement steps. For each of these three kinds of step, conditions
are given which ensure the correctness of the result. These conditions
seem to be as weak as possible under the constraint of being
expressible as local interface matching requirements.",
paper = "Sann89.pdf"
}
\end{chunk}
\begin{chunk}{axiom.bib}
@article{SCSCP10,
author = "Unknown",
title = {{Symbolic Computation Software Composability Project and
its implementations}},
journal = "ACM Comm. in Computer Algebra",
volume = "44",
number = "4",
year = "2010",
keywords = "printed"
}
\end{chunk}
\index{Damas, Luis}
\index{Milner, Robin}
\begin{chunk}{axiom.bib}
@inproceedings{Dama82,
author = "Damas, Luis and Milner, Robin",
title = {{Principal Typeschemes for Functional Programs}},
booktitle = "Proc POPL '82",
year = "1982",
pages = "207212",
paper = "Dama82.pdf",
keywords = "printed"
}
\end{chunk}
\index{Mobarakeh, S. Saeidi}
\begin{chunk}{axiom.bib}
@misc{Moba09,
author = "Mobarakeh, S. Saeidi",
title = {{Type Inference Algorithms}},
year = "2009",
link = "\url{https://www.win.tue.nl/~hzantema/semssm.pdf}",
abstract =
"n this paper we are going to describe the Wand’s type inference
algorithm and we’ll try to extend this algorithm with the notion of
polymorphic let. By means of a type system, which we’re going to
extend with some constraint language, we are able to extend the
algorithm’s first phase (generation of equations) with
letpolymorphism. The second phase of the algorithm (solving of the
generated equations) needs some modifications to be done on standard
unification algorithms because the new generated equation constructs
can not directly be fed in to the standard unification algorithm. The
correctness of the first phase of the algorithm is been proved by
extending the Wand’s soundness and completeness prove. Finally a
simple example is given to clarify the idea behind the algorithm.",
paper = "Moba09.pdf",
keywords = "printed"
}
\end{chunk}
\index{Kreitz, Christoph}
\index{Rahli, Vincent}
\begin{chunk}{axiom.bib}
@techreport{Kreix11,
author = "Kreitz, Christoph and Rahli, Vincent",
title = {{Introduction to Classic ML}},
type = "technical report",
institution = "Cornell University",
year = "2011",
paper = "Krei11.pdf",
keywords = "printed"
}
\end{chunk}
\index{Lee, Daniel K.}
\index{Crary, Karl}
\index{Harper, Robert}
@inproceedings{Leex07,
author = "Lee, Daniel K. and Crary, Karl and Harper, Robert",
title = {{Towards a Mechanized Metatheory of Standard ML}},
booktitle = "POPL'07",
publisher = "ACM",
year = "2007",
abstract =
"We present an internal language with equivalent expressive power to
Standard ML, and discuss its formalization in LF and the
machinechecked verification of its type safety in Twelf. The
internal language is intended to serve as the target of elaboration in
an elaborative semantics for Standard ML in the style of Harper and
Stone. Therefore, it includes all the programming mechanisms
necessary to implement Standard ML, including translucent modules,
abstraction, polymorphism, higher kinds, references, exceptions,
recursive types, and recursive functions. Our successful formalization
of the proof involved a careful interplay between the precise
formulations of the various mechanisms, and required the invention of
new representation and proof techniques of general interest.",
paper = "Leex07.pdf",
keywords = "printed"
}
\end{chunk}
\index{Tarditi, David}
\index{Morrisett, Greg}
\index{Cheng, Perry}
\index{Stone, Chris}
\index{Harper, Roboert}
\index{Lee, Peter}
\begin{chunk}{axiom.bib}
@inproceedings{Tard03,
author = "Tarditi, David and Morrisett, Greg and Cheng, Perry and
Stone, Chris and Harper, Roboert and Lee, Peter",
title = {{TIL: A TypeDirected, Optimizing Compiler for ML}},
booktitle = "20 Years of Prog. Lang. Design and Implementation",
year = "2003",
publisher = "ACM",
isbn = "1581136234",
paper = "Tard03.pdf",
keywords = "printed"
}
\end{chunk}
\index{Herman, David}
\begin{chunk}{axiom.bib}
@inproceedings{Herm07,
author = "Herman, David",
title = {{Functional Pearl: The Great Escape}},
booktitle = "ICFP'07",
publisher = "ACM",
isbn = "1595938152",
year = "2007",
abstract =
"Filinski showed that callcc and a single mutable reference cell
are sufficient to express the delimited control operators shift
and reset. However, this implementation interacts poorly with
dynamic bindings like exception handlers. We present a variation
on Filinski's encoding of delimited continuations that behaves
appropriately in the presence of exceptions and give an
implementation in Standard ML of New Jersey. We prove the encoding
correct with respect to the semantics of delimited dynamic binding.",
paper = "Herm07.pdf"
}
\end{chunk}
\index{Rossberg, Andreas}
\begin{chunk}{axiom.bib}
@misc{Ross13,
author = "Rossberg, Andreas",
title = {{HaMLet: To Be Or Not To Be Standard ML}},
year = "2013",
link = "\url{https://people.mpisws.org/~rossberg/hamlet}",
paper = "Ross13.pdf",
keywords = "printed"
}
\end{chunk}
\index{Kahrs, Stefan}
\index{Sannella, Donald}
\index{Tarlecki, Andrzej}
\begin{chunk}{axiom.bib}
@article{Kahr97,
author = "Kahrs, Stefan and Sannella, Donald and Tarlecki, Andrzej",
title = {{The Definition of Extended ML}},
journal = "Theoretical Computer Science",
volume = "173",
pages = "445484",
year = "1997",
abstract =
"This document formally defines the syntax and semantics of the
Extended ML language. It is based on the published semantics of
Standard ML in an attempt to ensure compatibility between the two
languages.",
paper = "Kahr97.pdf"
}
\end{chunk}
\index{Kahrs, Stefan}
\index{Sannella, Donald}
\begin{chunk}{axiom.bib}
@article{Kahr98,
author = "Kahrs, Stefan and Sannella, Donald",
title = {{Reflections on the Design of a Specification Language}},
journal = "LNCS",
volume = "1382",
pages = "154170",
abstract =
"We reflect on our experiences from work on the design and
semantic underpinnings of Extended ML, a specification language
which supports the specification and formal development of
Standard ML programs. Our aim is to isolate problems and issues
that are intrinsic to the general enterprise of designing a
specification language for use with a given programming language.
Consequently the lessons learned go far beyond our original aim of
designing a specification language for ML.",
paper = "Karh98.pdf",
keywords = "printed"
}
\end{chunk}

books/bookvolbib.pamphlet  2393 +++++++++++++++++++++++++++++++++++++
changelog  5 +
patch  2487 +++++++++++++++++++++++++++++++++
readme  5 +
src/axiomwebsite/patches.html  2 +
5 files changed, 4449 insertions(+), 443 deletions()
diff git a/books/bookvolbib.pamphlet b/books/bookvolbib.pamphlet
index 09e432f..f2d9b8c 100644
 a/books/bookvolbib.pamphlet
+++ b/books/bookvolbib.pamphlet
@@ 842,7 +842,7 @@ paragraph for those unfamiliar with the terms.
$\nu$ reduction does not commute with algebraic reduction, in general.
However, using long $\nu$normal forms, we show that if $R$ is canonical
then $R+\beta+type\beta+type\nu$ convertibility is still decidable.",
paper = "Brea89.pdf"
+ paper = "Brea89.pdf"
}
\end{chunk}
@@ 1302,21 +1302,6 @@ paper = "Brea89.pdf"
\subsection{D} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\index{Damas, Luis}
\index{Milner, Robin}
\begin{chunk}{axiom.bib}
@inproceedings{Dama82,
 author = "Damas, Luis and Milner, Robin",
 title = {{Principal Typeschemes for Functional Programs}},
 booktitle = "POPL 82",
 pages = "207212",
 year = "1982",
 isbn = "0897980656",
 paper = "Dama82.pdf"
}

\end{chunk}

\index{Davis, Martin D.}
\index{Sigal, Ron}
\index{Weyuker, Elaine J.}
@@ 1359,7 +1344,21 @@ paper = "Brea89.pdf"
Initial Semantics}},
publisher = "Springer Verlag",
year = "1985",
 isbn = "9780387137186"
+ isbn = "0387137181"
+}
+
+\end{chunk}
+
+\index{Ehrig, Hartmut}
+\index{Mahr, Bernd}
+\begin{chunk}{axiom.bib}
+@book{Ehri85a,
+ author = "Ehrig, Hartmut and Mahr, Bernd",
+ title = {{Fundamentals of Algebraic Specification 2: Module
+ Specifications and Constraints}},
+ publisher = "Springer Verlag",
+ year = "1985",
+ isbn = "0387517995"
}
\end{chunk}
@@ 1883,7 +1882,8 @@ paper = "Brea89.pdf"
reduced to type checking. The practical benefit of our treatment of
formal systems is that logicindependent tools, such as proof editors
and proof checkers, can be constructed.",
 paper = "Harp93.pdf"
+ paper = "Harp93.pdf",
+ keywords = "printed"
}
\end{chunk}
@@ 2006,7 +2006,7 @@ paper = "Brea89.pdf"
author = "Howard, W.H.",
title = {{The FormulaeasTypes Notion of Construction}},
year = "1969",
 link = "\url{}",
+ link = "\url{http://www.dcc.fc.up.pt/~acm/howard.pdf}",
abstract =
"The following consists of notes which were privately circulated in
1969. Since they have been referred to a few times in the literature,
@@ 2101,7 +2101,7 @@ paper = "Brea89.pdf"
\begin{chunk}{axiom.bib}
@misc{Isab11,
author = "Unknown",
 title = {{Isabell}},
+ title = {{Isabelle}},
link = "\url{http://www.cl.cam.ac.uk/research/hvg/Isabelle/index.html}",
year = "2011"
}
@@ 2735,7 +2735,8 @@ paper = "Brea89.pdf"
publisher = "Lab for Foundations of Computer Science, Univ. Edinburgh",
link = "\url{http://smlfamily.org/sml90defn.pdf}",
year = "1990",
 paper = "Miln90.pdf"
+ paper = "Miln90.pdf",
+ keywords = "printed"
}
\end{chunk}
@@ 9580,7 +9581,7 @@ when shown in factored form.
link = "\url{http://www.cecm.sfu.ca/personal/mmonagan/papers/AFGCD.pdf}",
abstract =
"Let $L$ be an algebraic function field in $k \ge 0$ parameters
 $t_1,\ldots,t)k$. Let $f_1$, $f_2$ be nonzero polynomials in
+ $t_1,\ldots,t_k$. Let $f_1$, $f_2$ be nonzero polynomials in
$L[x]$. We give two algorithms for computing their gcd. The first, a
modular GCD algorithm, is an extension of the modular GCD algorithm
for Brown for {\bf Z}$[x_1,\ldots,x_n]$ and Encarnacion for {\bf
@@ 9631,8 +9632,8 @@ when shown in factored form.
author = "Baez, John C.; Stay, Mike",
title = {{Physics, Topology, Logic and Computation: A Rosetta Stone}},
link = "\url{http://arxiv.org/pdf/0903.0340v3.pdf}",
 abstract = "
 In physics, Feynman diagrams are used to reason about quantum
+ abstract =
+ "In physics, Feynman diagrams are used to reason about quantum
processes. In the 1980s, it became clear that underlying these
diagrams is a powerful analogy between quantum physics and
topology. Namely, a linear operator behaves very much like a
@@ 10111,7 +10112,8 @@ when shown in factored form.
exculded middle similar to Brouwer's had been advanced in print by
Jules Molk two years before. Finally, we discuss the influence on
George Griss' negationless mathematics",
 paper = "Atte15.pdf, printed"
+ paper = "Atte15.pdf",
+ keywords = "printed"
}
\end{chunk}
@@ 12535,10 +12537,21 @@ when shown in factored form.
\end{chunk}
\section{Coercion Survey  Fall 2018}
+\section{Coerc ion Survey  Fall 2018}
\subsection{A} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Altenkirch, Thorsten}
+\begin{chunk}{axiom.bib}
+@misc{Alte18,
+ author = "Altenkirch, Thorsten",
+ title = {{Naive Type Theory}},
+ year = "2018",
+ link = "\url{https://www.youtube.com/watch?v=bNG53SA4n48p}"
+}
+
+\end{chunk}
+
\index{Arkoudas, Konstantine}
\index{Musser, David}
\begin{chunk}{axiom.bib}
@@ 12552,6 +12565,67 @@ when shown in factored form.
\end{chunk}
+\index{Armstrong, Alasdair}
+\index{Bauereiss, Thomas}
+\index{Campbell, Brian}
+\index{Reid, Alastair}
+\index{gray, Kathryn E.}
+\index{Norton, Robert M.}
+\index{Mundkur, Prashanth}
+\index{Wassell, Mark}
+\index{French, Jon}
+\index{Pulte, Christopher}
+\index{Flus, Shaked}
+\index{Krishnaswami, Neel}
+\index{Sewell, Peter}
+\begin{chunk}{axiom.bib}
+@article{Arms19,
+ author = "Armstrong, Alasdair and Bauereiss, Thomas and Campbell, Brian
+ and Reid, Alastair and gray, Kathryn E. and Norton, Robert M.
+ and Mundkur, Prashanth and Wassell, Mark and French, Jon
+ and Pulte, Christopher and Flus, Shaked and Krishnaswami, Neel
+ and Sewell, Peter",
+ title = {{ISA Semantics for ARMv8A, RISCV, and CHERIMIPS}},
+ journal = "Proc. ACM Programming Lang.",
+ volume = "3",
+ year = "2019",
+ abstract =
+ "Architecture specifications notionally define the fundamental
+ interface between hardware and software: the envelope of allowed
+ behaviour for processor implementations, and the basic assumptions for
+ software development and verification. But in practice, they are
+ typically prose and pseudocode documents, not rigorous or executable
+ artifacts, leaving software and verification on shaky ground.
+
+ In this paper, we present rigorous semantic models for the sequential
+ behaviour of large parts of the mainstream ARMv8A, RISCV, and MIPS
+ architectures, and the research CHERIMIPS architecture, that are
+ complete enough to boot operating systems, variously Linux, FreeBSD,
+ or seL4. Our ARMv8A models are automatically translated from
+ authoritative ARMinternal definitions, and (in one variant) tested
+ against the ARM Architecture Validation Suite.
+
+ We do this using a custom language for ISA semantics, Sail, with a
+ lightweight dependent type system, that supports automatic generation
+ of emulator code in C and OCaml, and automatic generation of
+ proofassistant definitions for Isabelle, HOL4, and (currently only
+ for MIPS) Coq. We use the former for validation, and to assess
+ specification coverage. To demonstrate the usability of the latter, we
+ prove (in Isabelle) correctness of a purely functional
+ characterisation of ARMv8A address translation. We moreover integrate
+ the RISCV model into the RMEM tool for (usermode) relaxedmemory
+ concurrency exploration. We prove (on paper) the soundness of the core
+ Sail type system.
+
+ We thereby take a big step towards making the architectural
+ abstraction actually welldefined, establishing foundations for
+ verification and reasoning.",
+ paper = "Arms19.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Aspinall, David}
\index{Compagnoni, Adriana}
\begin{chunk}{axiom.bib}
@@ 12582,8 +12656,157 @@ when shown in factored form.
\end{chunk}
+\index{Atkey, Robert}
+\begin{chunk}{axiom.bib}
+@inproceedings{Atke18,
+ author = "Atkey, Robert",
+ title = {{Syntax and Semantics of Quantitative Type Theory}},
+ booktitle = "LICS",
+ year = "2018",
+ publisher = "ACM",
+ isbn = "9781450355834",
+ abstract =
+ "We present Quantitative Type Theory, a Type Theory that records
+ usage information for each variable in a judgement, based on a
+ previous system by McBride. The usage information is used to give
+ a realizability semantics using a variant of Linear Combinatory
+ Algebras, refining the usual realizability semantics of Type
+ Theory by accurately tracking resource behaviour. We define the
+ semantics in terms of Quantitative Categories with Families, a
+ novel extension of Categories with Families for modelling resource
+ sensitive type theories.",
+ paper = "Atke18.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\subsection{B} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Baker, Henry G.}
+\begin{chunk}{axiom.bib}
+@article{Bake92,
+ author = "Baker, Henry G.",
+ title = {{A Decision Procedure for Common Lisp's SUBTYPEP Predicate}},
+ journal = "Lisp and Symbolic Computation",
+ volume = "5",
+ number = "3",
+ year = "1992",
+ pages = "157190",
+ abstract =
+ "Common Lisp [CL84,CL90] includes a dynamic datatype system of
+ moderate complexity, as well as predicates for checking the types of
+ language objects. Additionally, an interesting predicate of two 'type
+ specifiers'— SUBTYPEP —is included in the language. This subtypep
+ predicate provides a mechanism with which to query the Common Lisp
+ type system regarding containment relations among the various builtin
+ and userdefined types. While subtypep is rarely needed by an
+ applications programmer, the efficiency of a Common Lisp
+ implementation can depend critically upon the quality of its subtypep
+ predicate: the runtime system typically calls upon subtypep to decide
+ what sort of representations to use when making arrays; the compiler
+ calls upon subtypep to interpret user declarations, on which efficient
+ data representation and code generation decisions are based.
+
+ As might be expected due to the complexity of the Common Lisp type
+ system, there may be type containment questions which cannot be
+ decided. In these cases subtypep is expected to return 'can't
+ determine', in order to avoid giving an incorrect
+ answer. Unfortunately, most Common Lisp implementations have abused
+ this license by answering 'can't determine' in all but the most
+ trivial cases. In particular, most Common Lisp implementations of
+ SUBTYPEP fail on the basic axioms of the Common Lisp type system
+ itself [CL84,p.33]. This situation is particularly embarrassing for
+ Lisp—the premier 'symbol processing language'—in which the
+ implementation of complex symbolic logical operations should be
+ relatively easy. Since subtypep was presumably included in Common Lisp
+ to answer the hard cases of type containment, this 'lazy evaluation'
+ limits the usefulness of an important language feature.
+
+ This paper shows how those type containment relations of Common Lisp
+ which can be decided at all, can be decided simply and quickly by a
+ decision procedure which can dramatically reduce the number of
+ occurrences of the 'can't determine' answer from subtypep . This
+ decision procedure does not require the conversion of a type specifier
+ expression to conjunctive or disjunctive normal form, and therefore
+ does not incur the exponential explosion in space and time that such a
+ conversion would entail.
+
+ The lattice mechanism described here for deciding subtypep is also
+ ideal for performing type inference [Baker90]; the particular
+ implementation developed here, however, is specific to the type system
+ of Common Lisp [Beer88].",
+ paper = "Bake92.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Balzer, Stephanie}
+\index{Pfenning, Frank}
+\begin{chunk}{axiom.bib}
+@inproceedings{Balz17,
+ author = "Balzer, Stephanie and Pfenning, Frank",
+ title = {{Manifest Sharing with Session Types}},
+ booktitle = "Proc. ACM Program. Lang.",
+ publisher = "ACM",
+ year = "2017",
+ link = "\url{https://www.cs.cmu.edu/~fp/papers/icfp17.pdf}",
+ abstract =
+ "Sessiontyped languages building on the CurryHoward isomorphism
+ between linear logic and sessiontyped communication guarantee session
+ fidelity and deadlock freedom. Unfortunately, these strong guarantees
+ exclude many naturally occurring programming patterns pertaining to
+ shared resources. In this paper, we introduce sharing into a
+ sessiontyped language where types are stratified into linear and
+ shared layers with modal operators connecting the layers. The
+ resulting language retains session fidelity but not the absence of
+ deadlocks, which can arise from contention for shared processes. We
+ illustrate our language on various examples, such as the dining
+ philosophers problem, and provide a translation of the untyped
+ asynchronous $\Pi$calculus into our language.",
+ paper = "Balz17.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Balzer, Stephanie}
+\index{Pfenning, Frank}
+\index{Toninho, Bernardo}
+\begin{chunk}{axiom.bib}
+@article{Balz18,
+ author = "Balzer, Stephanie and Pfenning, Frank and Toninho, Bernardo",
+ title = {{A Universal Session Type for Untyped Asynchronous Communication}},
+ journal = "CONCUR",
+ volume = "30",
+ number = "1",
+ year = "2018",
+ link = "\url{https://www.cs.cmu.edu/~fp/papers/univtype18.pdf}",
+ abstract =
+ "In the simplytyped $\lambda$calculus we can recover the full
+ range of expressiveness of the untyped $\lambda$calculus solely
+ by adding a single recursive type
+ $U = U\rightarrow U$. In contract,
+ in the sessiontyped $\Pi$calculus, recursion alone is
+ insufficient to recover the untyped $\Pi$calculus, primarily due
+ to linearity: each channel just has two unique endpoints. In this
+ paper, we show that shared channels with a corresponding sharing
+ semantics (base on the the language SILL$_s$ developed in prior
+ work) are enough to embed the untyped asynchronous $\Pi$calculus
+ via a universal shared session type $U_s$. We show that
+ our encoding of the asynchronous $\Pi$calculus satisfies
+ operational correspondence and preserves observable actions (i.e.,
+ processes are weakly bisimilar to their encoding). Moreover, we
+ clarify the expressiveness of SILL$_s$ by developing an
+ operationally correct encoding of SILL$_s$ in the asynchronous
+ $\Pi$calculus.",
+ paper = "Balz18.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Barendregt, Hendrik Pieter}
\begin{chunk}{axiom.bib}
@article{Bare91,
@@ 12614,6 +12837,349 @@ when shown in factored form.
\end{chunk}
+\index{Barwise, Jon}
+\index{Moss, Lawrence}
+\begin{chunk}{axiom.bib}
+@book{Barw96,
+ author = "Barwise, Jon and Moss, Lawrence",
+ title = {{Vicious Circles}},
+ publisher = "CSLI Publications",
+ year = "1996",
+ isbn = "1575860082"
+}
+
+\end{chunk}
+
+\index{Beer, Randall D.}
+\begin{chunk}{axiom.bib}
+@article{Beer87,
+ author = "Beer, Randall D.",
+ title = {{Preliminary Report on A Practical Type Inference System
+ for Common Lisp}},
+ journal = "Lisp Pointers",
+ volume = "1",
+ number = "2",
+ year = "1987",
+ pages = "511",
+ paper = "Beer87.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Boas, Peter van Emde}
+\begin{chunk}{axiom.bib}
+@article{Boas12,
+ author = "Boas, Peter van Emde",
+ title = {{Turing Machines for Dummies}},
+ journal = "LNCS",
+ volume = "7147",
+ pages = "1430",
+ year = "2012",
+ abstract =
+ "Various methods exists in the litearture for denoting the con
+ figuration of a Turing Machine. A key difference is whether the head
+ position is indicated by some integer (mathematical representation) or
+ is specified by writing the machine state next to the scanned tape
+ symbol (intrinsic representation).
+
+ From a mathematical perspective this will make no difference. How
+ ever, since Turing Machines are primarily used for proving undecidability
+ and/or hardness results these representations do matter. Based on
+ a number of applications we show that the intrinsic representation
+ should be preferred.",
+ paper = "Boas12.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Bohrer, Brandon}
+\index{Crary, Karl}
+\begin{chunk}{axiom.bib}
+@article{Bohr17,
+ author = "Bohrer, Brandon and Crary, Karl",
+ title = {{TWAM: A Certifying Abstract Machine for Logic Programs}},
+ journal = "ACM Trans. on Computational Logic",
+ volume = "1",
+ number = "1",
+ year = "2017",
+ link = "\url{https://arxiv.org/pdf/1801.00471.pdf}",
+ abstract =
+ "Typepreserving (or typed) compilation uses typing derivations to
+ certify correctness properties of compilation. We have designed and
+ implemented a typepreserving compiler for a simplytyped dialect of
+ Prolog we call TProlog. The crux of our approach is a new certifying
+ abstract machine which we call the Typed Warren Abstract Machine
+ (TWAM). The TWAM has a dependent type system strong enough to specify
+ the semantics of a logic program in the logical framework LF. We
+ present a soundness metatheorem which constitutes a partial
+ correctness guarantee: welltyped programs implement the logic
+ program specified by their type. This metatheorem justifies our design
+ and implementation of a certifying compiler from TProlog to TWAM.",
+ paper = "Bohr17.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Boolos, George S.}
+\index{Burgess, John P.}
+\index{Jeffrey, Richard C.}
+\begin{chunk}{axiom.bib}
+@book{Bool07,
+ author = "Boolos, George S. and Burgess, John P. and Jeffrey, Richard C.",
+ title = {{Computability and Logic}},
+ publisher = "Cambridge University Press",
+ year = "2007",
+ isbn = "9780521701464"
+
+}
+
+\end{chunk}
+
+\index{Boyer, Robert S.}
+\index{Moore, J Strother}
+\begin{chunk}{axiom.bib}
+@book{Boye81a,
+ author = "Boyer, Robert S. and Moore, J Strother",
+ title = {{The Correctness Problem in Computer Science}},
+ publisher = "Academic Press",
+ year = "1981",
+ isbn = "0121229203"
+
+}
+
+\end{chunk}
+
+\index{Brady, Edwin C.}
+\begin{chunk}{axiom.bib}
+@phdthesis{Brad05,
+ author = "Brady, Edwin C.",
+ title = {{Practical Implementation of a Dependently Typed Functional
+ Programming Language}},
+ school = "University of Durham",
+ year = "2005",
+ link = "\url{https://eb.host.cs.standrews.ac.uk/writings/thesis.pdf}",
+ abstract =
+ "Types express a program’s meaning, and checking types ensures that a
+ program has the intended meaning. In a dependently typed programming
+ language types are predicated on values, leading to the possibility of
+ expressing invariants of a program’s behaviour in its type. Dependent
+ types allow us to give more detailed meanings to programs, and hence
+ be more confident of their correctness.
+
+ This thesis considers the practical implementation of a dependently
+ typed programming language, using the Epigram notation defined by
+ McBride and McKinna. Epigram is a high level notation for dependently
+ typed functional programming elaborating to a core type theory based
+ on Luo’s UTT, using Dybjer’s inductive families and elimination rules
+ to implement pattern matching. This gives us a rich framework for
+ reasoning about programs. However, a na ̈ıve implementation introduces
+ several runtime overheads since the type sys tem blurs the
+ distinction between types and values; these overheads include the
+ duplication of values, and the storage of redundant information and
+ explicit proofs.
+
+ A practical implementation of any programming language should be as
+ efficient as pos sible; in this thesis we see how the apparent
+ efficiency problems of dependently typed pro gramming can be overcome
+ and that in many cases the richer type information allows us to apply
+ optimisations which are not directly available in traditional
+ languages. I introduce three storage optimisations on inductive
+ families; forcing, detagging and collapsing. I further introduce a
+ compilation scheme from the core type theory to Gmachine code,
+ including a pattern matching compiler for elimination rules and a
+ compilation scheme for efficient run time implementation of Peano’s
+ natural numbers. We also see some low level optimisations for removal
+ of identity functions, unused arguments and impossible case branches.
+ As a result, we see that a dependent type theory is an effective base
+ on which to build a feasible programming language.",
+ paper = "Brad05.pdf"
+}
+
+\end{chunk}
+
+\index{Brady, Edwin}
+\begin{chunk}{axiom.bib}
+@misc{Brad17,
+ author = "Brady, Edwin",
+ title = {{Dependent Types in the Idris Programming Language 1}},
+ year = "2017",
+ link = "\url{https://www.cs.uoregon.edu/research/summerschool/summer17/topics.php}"
+
+}
+
+\end{chunk}
+
+\index{Brooks, Rodney A.}
+\index{Gabriel, Richard P.}
+\index{Steele, Guy L.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Broo82,
+ author = "Brooks, Rodney A. and Gabriel, Richard P. and Steele, Guy L.",
+ title = {{S1 Common Lisp Implementation}},
+ booktitle = "Proc ACM Symp. on Lisp and functional programming",
+ publisher = "ACM",
+ year = "1982",
+ isbn = "0897910826",
+ abstract =
+ "We are developing a Lisp implementation for the Lawrence
+ Livermore National Laboratory S1 Mark IIA computer. The dialect
+ of Lisp is an extension of COMMON Lisp [Steele;1982], a descendant
+ of Maclisp [Moon;1974] and Lisp Machine Lisp [Weinreb;1981].",
+ paper = "Broo82.pdf",
+ keywords = "printed"
+
+}
+
+\end{chunk}
+
+\index{Brooks, Rodney A.}
+\index{Gabriel, Richard P.}
+\index{Steele, Guy L.}
+\begin{chunk}{ignore} %{axiom.bib}
+@article{Broo82a,
+ author = "Brooks, Rodney A. and Gabriel, Richard P. and Steele, Guy L.",
+ title = {{An Optimizing Compiler for Lexically Scoped LISP}},
+ journal = "ACM SIGPLAN Notices",
+ volume = "17",
+ number = "6",
+ year = "1982",
+ pages = "261275',
+ abstract =
+ "We are developing an optimizing compiler for a dialect of the
+ LISP language. The current target architecture is the S1, a
+ multiprocessing supercomputer designed at Lawrence Livermore
+ National Laboraty. While LISP is usually thought of as a language
+ primarily for symbolic processing and list manipulation, this
+ compiler is also intended to compete with the S1 PASCAL and
+ FORTRAN compilers for quality of compiled numerical code. The S1
+ is designed for extremely highspeed signal processing as well as
+ for symbolic computation; it provides primitive operations on
+ vectors of floatingpoint and complex numbers. The LISP compiler
+ is designed to exploit the architecture heavily.
+
+ The compiler is structurally and conceptually similar to the
+ BLISS11 compiler and the compilers produced by PQCC. In
+ particular, the TNBIND technique has been borrowed and extended.
+
+ Particularly interesting properties of the compiler are:
+ \begin{itemize}
+ \item Extensive use of sourcetosource transformation
+ \item Use of an intermediate form that is expressionoriented
+ rather than statement oriented
+ \item Exploitation of tailrecursive function calls to represent
+ coplex control structures
+ \item Efficient compilation of code that can manipulate procedural
+ object that require heapallocated environments
+ \item Smooth runtime interfacing between the ``numerical world''
+ and ``LISP pointer world'', including automatic stack allocation
+ of objects that ordinarlly must be heapallocated
+ \end{itemize}
+
+ Each of these techniques has been used before, but we believe
+ their synthesis to be original and unique.
+
+ The compiler is tabledriven to a great extent, more so than
+ BLISS11 but less so than a PQCC compiler. We expect to be able to
+ redirect the compiler to other target architectures such as the
+ VAX or PDP10 with relatively little effort.",
+ paper = "Broo82a.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Brooks, Rodney A.}
+\index{Gabriel, Richard P.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Broo84,
+ author = "Brooks, Rodney A. and Gabriel, Richard P.",
+ title = {{A Critique of Common Lisp}},
+ booktitle = "Symposium on Lisp and Functional Programming",
+ pages = "18",
+ year = "1984",
+ abstract =
+ "A major goal of the COMMON LISP committee was to define a Lisp
+ language with sufficient power and generality that people would be
+ happy to stay within its confines and thus write inherently
+ transportable code. We argue that the resulting language
+ definition is too large for many shortterm and mediumterm
+ potential applications. In addition many parts of COMMON LISP
+ cannot be implemented very efficiently on stock hardware. We
+ further argue that the very generality of the design with its
+ different efficieny profiles on different architectures works
+ agains the goal of transportability.",
+ paper = "Broo84.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Brooks, Rodney A.}
+\index{Posner, David B.}
+\index{McDonald, James L.}
+\index{White, Jon L.}
+\index{Benson, Eric},
+\index{Gabriel, Richard P.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Broo86,
+ author = "Brooks, Rodney A. and Posner, David B. and McDonald, James L.
+ and White, Jon L. and Benson, Eric and Gabriel, Richard P.",
+ title = {{Design of An Optimising, Dynamically Retargetable Compiler
+ for Common Lisp}},
+ booktitle = "Conf. on Lisp and Functional Programming",
+ publisher = "ACM",
+ pages = "6785",
+ year = "1986",
+ abstract =
+ "We outline the components of a retargetable crosscompiler for
+ the Common Lisp language. A description is given of a method for
+ modeling the various hardware features in the compiler's database,
+ and a breakdown is shown of the compiler itself into various
+ machineindependent and machinedependent modules. A novel feature
+ of this development is the dynamic nature of the retargeting:
+ Databses for multiple hardware architectures are a standard part
+ of the compiler, and the internal interfaces used by the compiler
+ are such that the machinedependent modules may be instantly
+ switched from one to another. Examples of generated code in
+ several environments will be given to demonstrate the high quality
+ of the output available, even under this very modular approach..",
+ paper = "Broo86.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Burstall, R.M.}
+\begin{chunk}{axiom.bib}
+@article{Burs69,
+ author = "Burstall, R.M.",
+ title = {{Proving Properties of Programs by Structural Induction}},
+ journal = "Computer Journal",
+ volume = "12",
+ number = "1",
+ pages = "4148",
+ link = "\url{http://www.cse.chalmers.se/edu/year/2010/course/DAT140_Types/Burstall.pdf}",
+ year = "1969",
+ abstract =
+ "This paper discusses the technique of structural induction for
+ proving theorems about programs. This technique is closely related to
+ recursion induction but makes use of the inductive definition of the
+ data structures handled by the programs. It treats programs with
+ recursion but without assignments or jumps. Some syntactic extensions
+ to Landin's functional programming language ISWIM are suggested which
+ make it easier to program the manipulation of data structures and to
+ develop proofs about such programs. Two sample proofs are given to
+ demonstrate the technique, one for a tree sorting algorithm and one
+ for a simple compiler for expressions.",
+ paper = "Burs69.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\subsection{C} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\index{Callaghan, Paul}
@@ 12656,6 +13222,151 @@ when shown in factored form.
\end{chunk}
+\index{Christiansen, David Thrane}
+\begin{chunk}{axiom.bib}
+@misc{Chri18,
+ author = "Christiansen, David Thrane",
+ title = {{A Little Taste of Dependent Types}},
+ year = "2018",
+ link = "\url{https://www.youtube.com/watch?v=VxINoKFmS4}",
+ abstract =
+ "Dependent types let us use the same programming language for
+ compiletime and runtime code, and are inching their way towards the
+ mainstream from research languages like Coq, Agda and Idris. Dependent
+ types are useful for programming, but they also unite programming and
+ mathematical proofs, allowing us to use the tools and techniques we
+ know from programming to do math.
+
+ The essential beauty of dependent types can sometimes be hard to find
+ under layers of powerful automatic tools. The Little Typer is an
+ upcoming book on dependent types in the tradition of the The Little
+ Schemer that features a tiny dependently typed language called Pie. We
+ will demonstrate a proof in Pie that is also a program."
+}
+
+\end{chunk}
+
+\index{Chow, Timothy Y.}
+\begin{chunk}{axiom.bib}
+@misc{Chow18,
+ author = "Chow, Timothy Y.",
+ title = {{The Consistency of Arithmetic}},
+ year = "2018",
+ link = "\url{http://timothychow.net/consistent.pdf}",
+ paper = "Chow18.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Christiansen, David Thrane}
+\begin{chunk}{axiom.bib}
+@misc{Chri18a,
+ author = "Christiansen, David Thrane",
+ title = {{Coding for Types: The Universe Pattern in Idris}},
+ year = "2018",
+ link = "\url{https://www.youtube.com/watch?v=AWeT_G04a0A}"
+}
+
+\end{chunk}
+
+\index{Church, Alonzo}
+\begin{chunk}{axiom.bib}
+@book{Chur41,
+ author = "Church, Alonzo",
+ title = {{The Calculi of Lambda Conversion}},
+ year = "1941",
+ publisher = "Princeton University Press",
+ paper = "Alon41*.pdf"
+}
+
+\end{chunk}
+
+\index{Clochard, Martin}
+\index{Gondelman, Leon}
+\index{Pereira, Mario}
+\begin{chunk}{axiom.bib}
+@article{Cloc18,
+ author = "Clochard, Martin and Gondelman, Leon and Pereira, Mario",
+ title = {{The Matrix Reproved}},
+ journal = "Journal of Automated Reasoning",
+ volume = "60",
+ number = "3",
+ pages = "365383",
+ year = "2018",
+ link = "\url{https://hal.inria.fr/hal01617437/document}",
+ abstract =
+ "In this paper we describe a complete solution for the
+ first challenge of the VerifyThis 2016 competition held at the 18th
+ ETAPS Forum. We present the proof of two variants for the
+ multiplication of matrices: a naive version using three nested loops
+ and Strassen's algorithm. The proofs are conducted using the Why3
+ platform for deductive program verification and automated theorem
+ provers to discharge proof obligations. In order to specify and
+ prove the two multiplication algorithms, we develop a new Why3
+ theory of matrices. In order to prove the matrix identities on which
+ Strassen's algorithm is based, we apply the proof by reflection
+ methodology, which we implement using ghost state. To our knowledge,
+ this is the first time such a methodology is used under an
+ autoactive setting.",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Clocksin, William F.}
+\begin{chunk}{axiom.bib}
+@book{Cloc91,
+ author = "Clocksin, William F.",
+ title = {{Clause and Effect}},
+ year = "1991",
+ publisher = "Springer",
+ isbn = "3540629718"
+}
+
+\end{chunk}
+
+\index{Copeland, B. Jack}
+\begin{chunk}{axiom.bib}
+@book{Cope04,
+ author = "Copeland, B. Jack",
+ title = {{The Essential Turing}},
+ publisher = "Oxford University Press",
+ year = "2004",
+ isbn = "9780198250807"
+}
+
+\end{chunk}
+
+\index{Cosmo, Roberto Di}
+\begin{chunk}{axiom.bib}
+@book{Cosm95,
+ author = "Cosmo, Roberto Di",
+ title = {{Isomorphisms of TYpes}},
+ publisher = "Birkhauser",
+ year = "1995",
+ isbn = "081763763X"
+}
+
+\end{chunk}
+
+\index{Cousot, Patrick}
+\index{Cousot, Radhia}
+\begin{chunk}{axiom.bib}
+@inproceedings{Cous77,
+ author = "Cousot, Patrick and Cousot, Radhia",
+ title = {{Abstract Interpretation: A Unified Lattice Model for
+ Static Analysis of Programs by Construction or
+ Approximation of Fixpoints}},
+ booktitle = "Symp. on Principles of Programming Languages",
+ pages = "238252",
+ year = "1977",
+ paper = "Cous77.pdf",
+ keywords = "printed"
+}
+
+\end{chunk
+
\subsection{D} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\index{Dahl, O.J.}
@@ 12672,11 +13383,171 @@ when shown in factored form.
\end{chunk}
+\index{Damas, Luis}
+\index{Milner, Robin}
+\begin{chunk}{axiom.bib}
+@inproceedings{Dama82,
+ author = "Damas, Luis and Milner, Robin",
+ title = {{Principal Typeschemes for Functional Programs}},
+ booktitle = "Proc POPL '82",
+ year = "1982",
+ pages = "207212",
+ paper = "Dama82.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Dershowitz, Nachum}
+\index{Gurevich, Yuri}
+\begin{chunk}{axiom.bib}
+\article{Ders08,
+ author = "Dershowitz, Nachum and Gurevich, Yuri",
+ title = {{A Natural Axiomatization of Computability and Proof of
+ Church's Thesis}},
+ journal = "Bulletin of Symbolic Logic",
+ volume = "14",
+ number = "3",
+ year = "2008",
+ abstract =
+ "Church’s Thesis asserts that the only numeric functions that can be
+ calculated by effective means are the recursive ones, which are the
+ same, extensionally, as the Turing computable numeric functions. The
+ Abstract State Machine Theorem states that every classical algorithm
+ is behaviorally equivalent to an abstract state machine. This theorem
+ presupposes three natural postulates about algorithmic
+ computation. Here, we show that augmenting those postulates with an
+ additional requirement regarding basic operations gives a natural
+ axiomatization of computability and a proof of Church’s Thesis, as
+ Gödel and others suggested may be possible. In a similar way, but with
+ a different set of basic oper ations, one can prove Turing’s Thesis,
+ characterizing the effective string functions, and—in particular—the
+ effectivelycomputable functions on string representations of numbers.",
+ paper = "Ders08.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Dijkstra, Edsger W.}
+\index{Feijen, W.H.J}
+\begin{chunk}{axiom.bib}
+@book{Dijk88,
+ author = "Dijkstra, Edsger W. and Feijen, W.H.J",
+ title = {{A Method of Programming}},
+ publisher = "Addison Wesley",
+ year = "1988",
+ isbn = "0201175363"
+}
+
+\end{chunk}
+
+\index{Dijkstra, Edsger W.}
+\begin{chunk}{axiom.bib}
+@misc{Dijk70,
+ author = "Dijkstra, Edsger W.",
+ title = {{Concern for Correctness as a Guiding Principle for Program
+ Composition}},
+ year = "1970",
+ comment = "EWD288",
+ link = "\url{https://www.cs.utexas.edu/users/EWD/transcriptions/EWD02xx/EWD288.html}",
+ paper = "Dijk70.html",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Dijkstra, Edsger W.}
+\index{Scholten, Carel S.}
+\begin{chunk}{axiom.bib}
+@book{Dijk90,
+ author = "Dijkstra, Edsger W. and Scholten, Carel S.",
+ title = {{Predicate Calculus and Program Semantics}},
+ publisher = "Springer",
+ year = "1990",
+ isbn = "0387969578"
+}
+
+\end{chunk}
+
\subsection{E} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Ellis, Ferris}
+\begin{chunk}{axiom.bib}
+@misc{Elli18,
+ author = "Ellis, Ferris",
+ title = {{Strength in Numbers: Unums and the Quest for Reliable
+ Arithmetics}},
+ year = "2018",
+ link = "\url{https://www.youtube.com/watch?v=nVNYjmj_qbY}",
+ abstract =
+ "In the land of computer arithmetic, a tyrant has ruled since its very
+ beginning: the floating point number. Under its rule we have all
+ endured countless hardships and cruelties. To this very day the
+ floating point number still denies 0.1 + 0.2 == 0.3 and returns
+ insidious infinities to software developers everywhere. But a new hero
+ has entered fray: the universal number (unum). Can it topple the float
+ number system and its century long reign?
+
+ This talk will introduce unums, explain their benefits over floating
+ point numbers, and examine multiple real world examples comparing the
+ two. For those not familiar with floating point numbers and their
+ pitfalls, this talk also includes a primer on the topic. Code examples
+ are in Rust, though strong knowledge of the language is not needed."
+}
+
+\end{chunk}
+
\subsection{F} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Filliatre, JeanChristophe}
+\begin{chunk}{axiom.bib}
+@misc{Fill13a,
+ author = "Filliatre, JeanChristophe",
+ title = {{Deductive Program Verification}},
+ year = "2013",
+ comment = "slides"
+}
+
+\end{chunk}
+
+\index{Fredrikson, Matt}
+\begin{chunk}{axiom.bib}
+@misc{Fred16,
+ author = "Fredrikson, Matt",
+ title = {{Automated Program Verification and Testing}},
+ comment = "slides",
+ year = "2016"
+}
+
+\end{chunk}
+
\subsection{G} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Gabriel, Richard P.}
+\begin{chunk}{axiom.bib}
+@misc{Gabr10,
+ author = "Gabriel, Richard P.",
+ title = {{A Reivew of The Art of the Metaobject Protocol}},
+ year = "2010",
+ paper = "Gabr10.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Goldblatt, Robert}
+\begin{chunk}{axiom.bib}
+@book{Gold84,
+ author = "Goldblatt, Robert",
+ title = {{Topoi: The Categorical Analysis of Logic}},
+ year = "1984",
+ publisher = "Dover",
+ isbn = "9780486450261"
+}
+
+\end{chunk}
+
\index{Gordon, Michael J.}
\index{Milner, Arthur J.}
\index{Wadsworth, Christopher P.}
@@ 12786,8 +13657,185 @@ when shown in factored form.
\end{chunk}
+\index{Gurevich, Yuri}
+\begin{chunk}{axiom.bib}
+@article{Gure12,
+ author = "Gurevich, Yuri",
+ title = {{What Is An Algorithm?}},
+ journal = "LNCS",
+ volume = "7147",
+ pages = "3142",
+ year = "2012",
+ abstract =
+ "We attempt to put the title problem and the ChurchTuring thesis into
+ a proper perspective and to clarify some common misconcep tions
+ related to Turing’s analysis of computation. We examine two approaches
+ to the title problem, one wellknown among philosophers and another
+ among logicians.",
+ paper = "Gure12.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Gurevich, Yuri}
+\begin{chunk}{axiom.bib}
+@misc{Gure18,
+ author = "Gurevich, Yuri",
+ title = {{ChurchTuring Thesis Cannot Possibly Be True}},
+ year = "2018",
+ link =
+ "\url{https://www.microsoft.com/enus/research/video/churchturingthesiscannotpossiblybetrue/}",
+ abstract =
+ "The thesis asserts this: If an algorithm A computes a partial
+ function f from natural numbers to natural numbers then f is partially
+ recursive, i.e., the graph of f is recursively enumerable.
+
+ The thesis has been formulated in 1930s. The only algorithms at the
+ time were sequential algorithms. Sequential algorithms were
+ axiomatized in 2000. This axiomatization was used in 2008 to prove the
+ thesis for sequential algorithms, i.e., for the case where A ranges
+ over sequential algorithms.
+
+ These days, in addition to sequential algorithms, there are parallel
+ algorithms, distributed algorithms, probabilistic algorithms, quantum
+ algorithms, learning algorithms, etc.
+
+ The question whether the thesis is true in full generality is actively
+ discussed from 1960s. We argue that, in full generality, the thesis
+ cannot possibly be true."
+}
+
+\end{chunk}
+
\subsection{H} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Hahnle, Reiner}
+\index{Wallenburg, Angela}
+\begin{chunk}{axiom.bib}
+@misc{Hahn04,
+ author = "Hahnle, Reiner and Wallenburg, Angela",
+ title = {{Using a Software Testing Technique to Improve Theorem Proving}},
+ year = "2004",
+ link = "\url{http://www.cse.chalmers.se/~angelaw/papers/lic.pdf}",
+ comment = "paper follows thesis in file",
+ abstract =
+ "Most efforts to combine formal methods and software testing go in the
+ direction of exploiting formal methods to solve testing problems, most
+ commonly test case generation. Here we take the reverse viewpoint and
+ show how the technique of partition testing can be used to improve a
+ formal proof technique (induction for correctness of loops). We first
+ compute a partition of the domain of the induction variable, based on
+ the branch predicates in the program code of the loop we wish to
+ prove. Based on this partition we derive mechanically a partitioned
+ induction rule, which is (hopefully) easier to use than the standard
+ induction rule. In particular, with an induction rule that is
+ tailored to the program to be verified, less user interaction can be
+ expected to be required in the proof. We demonstrate with a number of
+ examples the effectiveness of our method.",
+ paper = "Wall04.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Hantler, Sidney L.}
+\index{King, James C.}
+\begin{chunk}{axiom.bib}
+@article{Hant76,
+ author = "Hantler, Sidney L. King, James C.",
+ title = {{An Introduction to Proving the Correctness of Programs}},
+ journal = "ACM Computing Surveys",
+ volume = "8",
+ number = "3",
+ pages = "331353",
+ year = "1976",
+ abstract =
+ "This paper explains, in an introductory fashion, the method of
+ specifying the correct behavior of a program by the use of
+ input/output assertions and describes one method for showing that the
+ program is correct with respect to those assertions. An initial
+ assertion characterizes conditions expected to be true upon entry to
+ the program and a final assertion characterizes conditions expected to
+ be true upon exit from the program. When a program contains no
+ branches, a technique known as symbolic execution can be used to show
+ that the truth of the initial assertion upon entry guarantees the
+ truth of the final assertion upon exit. More generally, for a program
+ with branches one can define a symbolic execution tree. If there is
+ an upper bound on the number of times each loop in such a program may
+ be executed, a proof of correctness can be given by a simple traversal
+ of the (finite) symbolic execution tree.
+
+ However, for most programs, no fixed bound on the number of times each
+ loop is executed exists and the corresponding symbolic execution trees
+ are infinite. In order to prove the correctness of such programs, a
+ more general assertion structure must be provided. The symbolic
+ execution tree of such programs must be traversed inductively rather
+ than explicitly. This leads naturally to the use of additional
+ assertions which are called ``inductive assertions.''",
+ paper = "Hant76.pdf",
+ keywords = "printed",
+}
+
+\end{chunk}
+
+\index{Hargreaves, G.}
+\begin{chunk}{axiom.bib}
+@mastersthesis{Harg02a,
+ author = "Hargreaves, G.",
+ title = {{Interval analysis in MATLAB}},
+ school = "University of Manchester, Dept. of Mathematics",
+ year = "2002"
+}
+
+\end{chunk}
+
+\index{Harper, Robert}
+\index{Stone, Christopher}
+\begin{chunk}{axiom.bib}
+@techreport{Harp97,
+ author = "Harper, Robert and Stone, Christopher",
+ title = {{An Interpretation of Standard ML in Type Theory}},
+ type = "technical report",
+ institution = "Carnegie Mellon University",
+ number = "CMUCS97147",
+ year = "1997",
+ abstract =
+ "We define an interpretation of Standard ML into type theory. The
+ interpretation takes the form of a set of elaboration rules
+ reminiscent of the static semantics given in {\sl The Definition
+ of Standard ML}. In particular, the elaboration rules are given in
+ a relational style, exploiting indeterminancy to avoid
+ overcommitment to specific implementation techniques. Elaboration
+ consists of identifier scope resolution, type checking and type
+ inference, expansion of derived forms, pattern compilation,
+ overloading resolution, equality compilation, and the coercive
+ aspects of signature matching.
+
+ The underlying type theory is an explicitlytyped
+ $\lambda$calculus with product, sum, function, and recursive
+ types, together with module types derived from the translucent sum
+ formalism of Harper and Lillibridge. Programs of the type theory
+ are given a typepassing dynamic semantics compatible with
+ constructs such as polymorphic equality that rely on type analysis
+ at runtime.",
+ paper = "Harp97.pdf"
+}
+
+\end{chunk}
+
+\index{Harper, Robert}
+\begin{chunk}{axiom.bib}
+@misc{Harp16,
+ author = "Harper, Robert",
+ title = {{Practical Foundations for Progamming Languages}},
+ publisher = "Cambridge University Press",
+ year = "2016",
+ isbn = "9781107150300"
+}
+
+\end{chunk}
+
\index{Harper, Robert}
\begin{chunk}{axiom.bib}
@misc{Harp18,
@@ 12801,13 +13849,396 @@ when shown in factored form.
\end{chunk}
+\index{Haynes, Christopher T.}
+\index{Friedman, Daniel P.}
+\begin{chunk}{axiom.bib}
+@techreport{Hayn87,
+ author = "Haynes, Christopher T. and Friedman, Daniel P.",
+ title = {{Embedding Continuations in Procedural Objects}},
+ type = "technical report",
+ institution = "Indiana University",
+ number = "213",
+ year = "1987",
+ abstract =
+ "Continuations, when available as firstclass objects, provide a
+ general control abstraction in programming languages. They
+ liberate the programmer from specific control structures,
+ increasing programming language extensibility. Such continuations
+ may be extended by embedding them in procedural objects. This
+ technique is first used to restore a fluid environment when a
+ continuation object is invoked. We then consider techniques for
+ constraining the power of continuations in the interest of
+ security and efficiency. Domain mechanisms, which create dynamic
+ barriers for enclosing control, are implemented using
+ fluids. Domains are then used to implement an unwindprotect
+ facility in the presence of firstclass continuations. Finally, we
+ present two mechanisms, windunwind and dynamicwind, that
+ generalizes unwindprotect.",
+ paper = "Hayn87.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{van Heijenoort, Jean}
+\begin{chunk}{axiom.bib}
+@book{Heij67,
+ author = "van Heijenoort, Jean",
+ title = {{From Frege to Godel}},
+ publisher = "Harvard University Press",
+ isbn = "0674324498",
+ year = "1967"
+}
+
+\end{chunk}
+
+\index{Hemann, Jason}
+\index{Friedman, Daniel}
+\begin{chunk}{axiom.bib}
+@misc{Hema14,
+ author = "Hemann, Jason and Friedman, Daniel",
+ title = {{Write the Other Half of Your Program}},
+ year = "2014",
+ link = "\url{https://www.youtube.com/watch?v=RG9fBbQrVOM}"
+}
+
+\end{chunk}
+
+\index{Henglein, Friedrich}
+\begin{chunk}{axiom.bib}
+@phdthesis{Heng89,
+ author = "Henglein, Friedrich",
+ title = {{Polymorphic Type Inference and SemiUnification}},
+ school = "Rutgers",
+ year = "1989",
+ link =
+ "\url{http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.388.1275&rep=rep1&type=pdf}",
+ abstract =
+ "In the last ten years declarationfree programming languages with
+ a polymorphic typing discipline (ML, B) have been developed to
+ approximate the flexibility and conciseness of dynamically typed
+ languages (LISP, SETL) while retaining the safety and execution
+ efficiency of conventional statically typed languages (Algol68,
+ Pascal). These polymorphic languages can be type checked at
+ compile time, yet allow functions whose arguments range over a
+ variety of types.
+
+ We investigate several polymorphic type systems, the most powerful
+ of which, termed MilnerMycroft Calculus, extends the socalled
+ letpolymorphism found in, e.g. ML with a polymorphic typing rule
+ for recursive defiitions. We show that semiunification, the
+ problem of solving inequalities over firstorder terms,
+ characterizes type checking in the MilnerMycroft Calculus to
+ polynomial time, even in the restricted case were nested
+ definitions are disallowed. This permits us to extend some
+ infeasibility results for related combinatorial problems to type
+ inference and to correct several claims and statements in the
+ literature.
+
+ We prove the existence of unique most general solutions of term
+ inequalities, called most general semiunifiers, and present an
+ algorithm for computing them that terminates for all known inputs
+ due to a novel ``extended occurs check''. We conjecture this
+ algorithm to be uniformly terminating even though, at present,
+ general semiunification is not known to be decidable. We prove
+ termination of our algorithm for a restricted case of
+ semiunification that is of independent interest.
+
+ Finally, we offer an explanation for the apparent practicality of
+ polymorphic type inference in the face of theoretical
+ intractability results.",
+ paper = "Heng89.pdf",
+}
+
+\end{chunk}
+
+\index{Herken, Rolf}
+\begin{chunk}{axiom.bib}
+@book{Herk88,
+ author = "Herken, Rolf",
+ title = {{The Universal Turing Machine}},
+ publisher = "Oxford Science Publications",
+ year = "1988",
+ isbn = "0198537743"
+}
+
+\end{chunk}
+
+\index{Herman, David}
+\begin{chunk}{axiom.bib}
+@inproceedings{Herm07,
+ author = "Herman, David",
+ title = {{Functional Pearl: The Great Escape}},
+ booktitle = "ICFP'07",
+ publisher = "ACM",
+ isbn = "1595938152",
+ year = "2007",
+ abstract =
+ "Filinski showed that callcc and a single mutable reference cell
+ are sufficient to express the delimited control operators shift
+ and reset. However, this implementation interacts poorly with
+ dynamic bindings like exception handlers. We present a variation
+ on Filinski's encoding of delimited continuations that behaves
+ appropriately in the presence of exceptions and give an
+ implementation in Standard ML of New Jersey. We prove the encoding
+ correct with respect to the semantics of delimited dynamic binding.",
+ paper = "Herm07.pdf"
+}
+
+\end{chunk}
+
+\index{Heyting, A.}
+\begin{chunk}{axiom.bib}
+@book{Heyt56,
+ author = "Heyting, A.",
+ title = {{Intuitionism: An Introduction}},
+ publisher = "North Holland",
+ year = "1956"
+}
+
+\end{chunk}
+
+\index{Hilbert, David}
+\index{CohnVossen, S.}
+\begin{chunk}{axiom.bib}
+@book{Hilb52,
+ author = "Hilbert, David and CohnVossen, S.",
+ title = {{Geometry and the Imagination}},
+ publisher = "Chelsea Publishing Company",
+ year = "1952"
+}
+
+\end{chunk}
+
\subsection{I} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{J} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Jeffrey, Richard}
+\begin{chunk}{axiom.bib}
+@book{Jeff81,
+ author = "Jeffrey, Richard",
+ title = {{Formal Logic: Its Scope and Limits}},
+ publisher = "McGrawHill",
+ year = "1981",
+ isbn = "0070323216"
+}
+
+\end{chunk}
+
\subsection{K} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Kahrs, Stefan}
+\index{Sannella, Donald}
+\index{Tarlecki, Andrzej}
+\begin{chunk}{axiom.bib}
+@article{Kahr97,
+ author = "Kahrs, Stefan and Sannella, Donald and Tarlecki, Andrzej",
+ title = {{The Definition of Extended ML}},
+ journal = "Theoretical Computer Science",
+ volume = "173",
+ pages = "445484",
+ year = "1997",
+ abstract =
+ "This document formally defines the syntax and semantics of the
+ Extended ML language. It is based on the published semantics of
+ Standard ML in an attempt to ensure compatibility between the two
+ languages.",
+ paper = "Kahr97.pdf"
+}
+
+\end{chunk}
+
+\index{Kahrs, Stefan}
+\index{Sannella, Donald}
+\begin{chunk}{axiom.bib}
+@article{Kahr98,
+ author = "Kahrs, Stefan and Sannella, Donald",
+ title = {{Reflections on the Design of a Specification Language}},
+ journal = "LNCS",
+ volume = "1382",
+ pages = "154170",
+ year = "1998",
+ abstract =
+ "We reflect on our experiences from work on the design and
+ semantic underpinnings of Extended ML, a specification language
+ which supports the specification and formal development of
+ Standard ML programs. Our aim is to isolate problems and issues
+ that are intrinsic to the general enterprise of designing a
+ specification language for use with a given programming language.
+ Consequently the lessons learned go far beyond our original aim of
+ designing a specification language for ML.",
+ paper = "Karh98.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Kaplan, Marc A.}
+\index{Ullman, Jeffrey D.}
+\begin{chunk}{axiom.bib}
+@article{Kapl80,
+ author = "Kaplan, Marc A. and Ullman, Jeffrey D.",
+ title = {{A Scheme for the Automatic Inference ov Variable Types}},
+ journal = "J. ACM",
+ volume = "27",
+ number = "1",
+ pages = "128.145",
+ year = "1980",
+ abstract =
+ "In this paper an algorithm for the determination of runtime
+ types in a programming language requiring no type declarations is
+ presented. It is demonstrated that this algorithm is superior to
+ other published algorithms in the sense that it produces stronger
+ assertions about the set of possible types for variables than do
+ other known algorithms. In fact this algorithm is shown to be the
+ best possible algorithm from among all those that use the same set
+ of primitive operators.",
+ paper = "Kapl80.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Kiczales, Gregor}
+\index{des Rivieres, Jim}
+\index{Bobrow, Daniel G.}
+\begin{chunk}{axiom.bib}
+@book{Kicz91,
+ author = "Kiczales, Gregor and des Rivieres, Jim and Bobrow, Daniel G.",
+ title = {{The Art of the Metaobject Protocol}},
+ publisher = "MIT Press",
+ year = "1991",
+ isbn = "0262610744"
+}
+
+\end{chunk}
+
+\index{Kleene, S.C.}
+\index{Vesley, R.E.}
+\begin{chunk}{axiom.bib}
+@book{Klee65,
+ author = "Kleene, S.C. and Vesley, R.E.",
+ title = {{The Foundations of Intuitionistic Mathematics}},
+ publisher = "NorthHolland Publishing Company",
+ year = "1965"
+}
+
+\end{chunk}
+
+\index{Kreitz, Christoph}
+\index{Rahli, Vincent}
+\begin{chunk}{axiom.bib}
+@techreport{Kreix11,
+ author = "Kreitz, Christoph and Rahli, Vincent",
+ title = {{Introduction to Classic ML}},
+ type = "technical report",
+ institution = "Cornell University",
+ year = "2011",
+ paper = "Krei11.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Kripke, Saul A.}
+\begin{chunk}{axiom.bib}
+@book{Krip80,
+ author = "Kripke, Saul A.",
+ title = {{Naming and Necessity}},
+ publisher = "Harvard University Press",
+ year = "1980",
+ isbn = "0674598466"
+}
+
+\end{chunk}
+
\subsection{L} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Lee, Daniel K.}
+\index{Crary, Karl}
+\index{Harper, Robert}
+\begin{chunk}{axiom.bib}
+@inproceedings{Leex07,
+ author = "Lee, Daniel K. and Crary, Karl and Harper, Robert",
+ title = {{Towards a Mechanized Metatheory of Standard ML}},
+ booktitle = "POPL'07",
+ publisher = "ACM",
+ year = "2007",
+ abstract =
+ "We present an internal language with equivalent expressive power to
+ Standard ML, and discuss its formalization in LF and the
+ machinechecked verification of its type safety in Twelf. The
+ internal language is intended to serve as the target of elaboration in
+ an elaborative semantics for Standard ML in the style of Harper and
+ Stone. Therefore, it includes all the programming mechanisms
+ necessary to implement Standard ML, including translucent modules,
+ abstraction, polymorphism, higher kinds, references, exceptions,
+ recursive types, and recursive functions. Our successful formalization
+ of the proof involved a careful interplay between the precise
+ formulations of the various mechanisms, and required the invention of
+ new representation and proof techniques of general interest.",
+ paper = "Leex07.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\subsection{M} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Ma, KwanLiu}
+\index{Kessler, Robert R.}
+\begin{chunk}{axiom.bib}
+@article{Maxx90,
+ author = "Ma, KwanLiu and Kessler, Robert R.",
+ title = {{TICL  A Type Inference System for Common Lisp}},
+ journal = "Software  Practice and Experience",
+ volume = "20",
+ number = "6",
+ pages = "593623",
+ year = "1990",
+ abstract =
+ "Most current Common Lisp compilers generate more efficient code
+ when supplied with data type information. However, in keeping with
+ standard Lisp programming style, most programmers are reluctant to
+ provide type information; they simply allow the runtime type
+ system to managed the data types accordingly. To fill this gap, we
+ have designed and implemented a type inference system for Common
+ Lisp (TICL). TICL takes a Lisp program that has been annotated
+ with a few type declarations, adds as many declarations as
+ possible, and produces a type declared program. The compiler can
+ then use this information to generate more efficient
+ code. Measurements indicate that a 20 per cent speed improvement
+ can generally be achieved.",
+ paper = "Maxx90.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{McKenna, Brian}
+\begin{chunk}{axiom.bib}
+@misc{Mcke14,
+ author = "McKenna, Brian",
+ title = {{Idris: Practical Dependent Types with Practical Examples}},
+ year = "2014",
+ link = "\url{https://www.youtube.com/watch?v=4i7KrG1Afbk}",
+ abstract =
+ "Dependent types turn types into a firstclass language construct and
+ allows types to be predicated upon values. Allowing types to be
+ controlled by ordinary values allows us to prove many more properties
+ about our programs and enables some interesting forms of
+ metaprogramming. We can do interesting things like write a typesafe
+ printf or verify algebraic laws of data structures  but while
+ dependent types are quickly gaining in popularity, it can be tricky to
+ find examples which are both useful and introductory.
+
+ This talk will demonstrate some practical dependent typing examples
+ (and not sized vectors) using Idris, a language designed for writing
+ executable programs, rather than just proving theorems."
+}
+
+\end{chunk}
+
\index{McCarthy, John}
\index{Abrahams, Paul W.}
\index{Edwards, Daniel J.}
@@ 12839,8 +14270,170 @@ when shown in factored form.
\end{chunk}
+\index{Mills, Bruce}
+\begin{chunk}{axiom.bib}
+@book{Mill06,
+ author = "Mills, Bruce",
+ title = {{Theoretical Introduction to Programming}},
+ publisher = "Springer",
+ year = "2006",
+ isbn = "1846280214"
+}
+
+\end{chunk}
+
+\index{Mobarakeh, S. Saeidi}
+\begin{chunk}{axiom.bib}
+@misc{Moba09,
+ author = "Mobarakeh, S. Saeidi",
+ title = {{Type Inference Algorithms}},
+ year = "2009",
+ link = "\url{https://www.win.tue.nl/~hzantema/semssm.pdf}",
+ abstract =
+ "n this paper we are going to describe the Wand’s type inference
+ algorithm and we’ll try to extend this algorithm with the notion of
+ polymorphic let. By means of a type system, which we’re going to
+ extend with some constraint language, we are able to extend the
+ algorithm’s first phase (generation of equations) with
+ letpolymorphism. The second phase of the algorithm (solving of the
+ generated equations) needs some modifications to be done on standard
+ unification algorithms because the new generated equation constructs
+ can not directly be fed in to the standard unification algorithm. The
+ correctness of the first phase of the algorithm is been proved by
+ extending the Wand’s soundness and completeness prove. Finally a
+ simple example is given to clarify the idea behind the algorithm.",
+ paper = "Moba09.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Moschovakis, Y.N.}
+\begin{chunk}{axiom.bib}
+@article{Mosc84,
+ author = "Moschovakis, Y.N.",
+ title = {{Abstract Recursion as a Foundation of the Theory of Algorithms}},
+ journal = "LNCS",
+ volume = "1104",
+ pages = "289364",
+ publisher = "Springer",
+ year = "1984",
+ abstract =
+ "The main object of this paper is to describe an abstract,
+ (axiomatic) theory of recursion and its connection with some of
+ the basic, foundational questions of computer science.",
+ paper = "Mosc84.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Moy, Yannick}
+\index{Wallenburg, Angela}
+\begin{chunk}{axiom.bib}
+@misc{Moyx10,
+ author = "Moy, Yannick and Wallenburg, Angela",
+ title = {{Tokeneer: Beyond Formal Program Verification}},
+ year = "2010",
+ link = "\url{http://www.cse.chalmers.se/~angelaw/papers/ERTS2010.pdf}",
+ abstract =
+ "Tokeneer is a smallsized (10 kloc) security system which was
+ formally developed and verified by Praxis at the request of NSA, using
+ SPARK technology. Since its opensource release in 2008, only two
+ problems were found, one by static analysis, one by code review. In
+ this paper, we report on experiments where we systematically applied
+ various static analysis tools (compiler, bugfinder, proof tools)
+ and focused code reviews to all of the SPARK code (formally verified)
+ and supporting Ada code (not formally verified) of the Tokeneer
+ Project. We found 20 new problems overall, half of which are defects
+ that could lead to a system failure should the system be used in its
+ current state. Only two defects were found in SPARK code, which
+ confirms the benefits of applying formal verification to reach
+ higher levels of assurance. In order to leverage these benefits to
+ code that is was not formally verified from the start, we propose to
+ associate static analyses and dynamic analyses around a common
+ expression of properties and constraints. This is the goal of starting
+ project HiLite, which involves AdaCore and Altran Praxis together
+ with several industrial users and research labs.",
+ paper = "Moyx10.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Murray, Toby}
+\index{van Oorcchot, P.C.}
+\begin{chunk}{axiom.bib}
+@misc{Murr18,
+ author = "Murray, Toby and van Oorcchot, P.C.",
+ title = {{BP: Formal Proofs, the Fine Print and Side Effects}},
+ link = "\url{https://people.eng.unimelb.edu.au/tobym/papers/secdev2018.pdf}",
+ comment = "Version: 26 June 2018 to appear in IEEE SecDev 2018",
+ abstract =
+ "Given recent highprofile successes in formal verification of
+ securityrelated properties (e.g. for seL4), and the rising
+ popularity of applying formal methods to cryptographic libraries
+ and security protocols like TLS, we revisit the meaning of
+ securityrelated proofs about software. We reexamine old issues,
+ and identify new questions that have escaped scrutiny in the
+ formal methods literature. We consider what value proofs about
+ software systems deliver to endusers (e.g. in terms of net
+ assurance benefits), and at what cost in terms of side effects
+ (such as changes made to software to facilitate the proofs, and
+ assumptionrelated deployment restrictions imposed on software if
+ these proofs are to remain valid in operation). We consider in
+ detail, for the first time to our knowledge, possible
+ relationships between proofs and side effects. To make our
+ discussion concrete, we draw on tangible examples, experience, and
+ the literature.",
+ paper = "Murr18.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\subsection{N} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Nimmer, Jeremy W.}
+\index{Ernst, Michael D.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Nimm02,
+ author = "Nimmer, Jeremy W. and Ernst, Michael D.",
+ title = {{Automatic Generation of Program Specifications}},
+ booktitle = "Symp. on Software Testing and Analysis",
+ publisher = "ACM Press",
+ year = "2002",
+ abstract =
+ "Producing specifications by dynamic (runtime) analysis of program
+ executions is potentially unsound, because the analyzed executions may
+ not fully characterize all possible executions of the program. In
+ practice, how accurate are the results of a dynamic analysis? This
+ paper describes the results of an investigation into this question,
+ determining how much specifications generalized from program runs
+ must be changed in order to be verified by a static checker.
+ Surprisingly, small test suites captured nearly all program behavior
+ required by a specific type of static checking; the static checker
+ guaranteed that the implementations satisfy the generated specifications
+ , and ensured the absence of runtime exceptions. Measured
+ against this verification task, the generated specifications scored
+ over 90 percent on precision, a measure of soundness, and on recall, a
+ measure of completeness.
+
+ This is a positive result for testing, because it suggests that
+ dynamic analyses can capture all semantic information of interest for
+ certain applications. The experimental results demonstrate that a
+ specific technique, dynamic invariant detection, is effective at
+ generating consistent, sufficient specifications for use by a static
+ checker. Finally, the research shows that combining static and
+ dynamic analyses over program specifications has benefits for users of
+ each technique, guaranteeing soundness of the dynamic analysis and
+ lessening the annotation burden for users of the static analysis.",
+ paper = "Nimm02.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Nipkow, Tobias}
\index{Tabacznyj, Christophe}
\index{Paulson, Lawrence C.}
@@ 12860,11 +14453,223 @@ when shown in factored form.
\end{chunk}
\subsection{O} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Olsson, Ola}
+\index{Wallenburg, Angela}
+\begin{chunk}{axiom.bib}
+@misc{Olss04,
+ author = "Olsson, Ola and Wallenburg, Angela",
+ title = {{Automatic Generation of Customised Induction Rules for
+ Proving Correctness of Imperative Programs}},
+ year = "2004",
+ comment = "paper follows thesis in file",
+ link = "\url{http://www.cse.chalmers.se/~angelaw/papers/lic.pdf}",
+ abstract =
+ "In this paper we develop a method for automatic construction of
+ customised induction rules for use in a semiinteractiv e theorem
+ prover. The induction rules are developed to prove the total
+ correctness of loops in an objectoriented language. We concentrate
+ on integers. First we compute a partition of the domain of the
+ induction variable. Our method makes use of failed proof attempts in
+ the theorem prover to gain information about the problem structure and
+ create the partition. Then, based on this partition we create an
+ induction rule, in destructor style, that is customised to make the
+ proving of the loop simpler. Our concern is in user interaction,
+ rather than in proof strength.
+
+ Using the customised induction rules, some separation of proof of
+ control flow and data correctness is achieved, and we find that in
+ comparison to standard (Peano) induction or Noetherian induction,
+ simpler user interaction can be expected. Furthermore, by using
+ destructor style induction we circumvent the problem of creating
+ inverses of functions. We show the soundness of the customised
+ induction rules created by the method. Furthermore, we use the
+ machinery of the theorem prover (KeY) to make the method automatic.
+ Several interesting areas are also identified that could open up for
+ a larger range of loops the method can handle, as well as pointing
+ towards full automation of these cases.",
+ paper = "Wall04.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Olsson, Ola}
+\index{Wallenburg, Angela}
+\begin{chunk}{axiom.bib}
+@misc{Olss05,
+ author = "Olsson, Ola and Wallenburg, Angela",
+ title = {{Customised Induction Rules for Proving Correctness of
+ Imperative Programs}},
+ year = "2005",
+ link = "\url{http://www.cse.chalmers.se/~angelaw/papers/olswbg05.pdf}",
+ abstract =
+ "In this paper we develop a method for automatic construction of
+ customised induction rules for use in a semiinteractiv e theorem
+ prover. The induction rules are developed to prove the total
+ correctness of loops in an objectoriented language. We concentrate
+ on integers. First we compute a partition of the domain of the
+ induction variable. Our method makes use of failed proof attempts in
+ the theorem prover to gain information about the problem structure and
+ create the partition. Then, based on this partition we create an
+ induction rule, in destructor style, that is customised to make the
+ proving of the loop simpler. Our concern is in user interaction,
+ rather than in proof strength.
+
+ Using the customised induction rules, some separation of proof of
+ control flow and data correctness is achieved, and we find that in
+ comparison to standard (Peano) induction or Noetherian induction,
+ simpler user interaction can be expected. Furthermore, by using
+ destructor style induction we circumvent the problem of creating
+ inverses of functions. We show the soundness of the customised
+ induction rules created by the method. Furthermore, we use the
+ machinery of the theorem prover (KeY) to make the method automatic.
+ Several interesting areas are also identified that could open up for
+ a larger range of loops the method can handle, as well as pointing
+ towards full automation of these cases.",
+ paper = "Olss05.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\subsection{P} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Pittman, Dan}
+\begin{chunk}{axiom.bib}
+@misc{Pitt18,
+ author = "Pittman, Dan",
+ title = {{Proof Theory Impressionism: Blurring the CurryHoward Line}},
+ link = "\url{https://www.youtube.com/watch?v=jrVPBAd5Gc}",
+ year = "2018",
+ comment = "Strange Loop 2018 Conference"
+}
+
+\end{chunk}
+
+\index{Platzer, Andre}
+\begin{chunk}{axiom.bib}
+@book{Plat18,
+ author = "Platzer, Andre",
+ title = {{Logical Foundations of CyberPhysical Systems}},
+ publisher = "Springer",
+ year = "2018",
+ isbn = "9783319635873"
+}
+
+\end{chunk}
+
+\index{Polikarpova, Nadia}
+\begin{chunk}{axiom.bib}
+@misc{Poli18,
+ author = "Polikarpova, Nadia",
+ title = {{TypeDriven Program Synthesis}},
+ link = "\url{https://www.youtube.com/watch?v=HnOix9TFy1A}",
+ year = "2018",
+ abstract =
+ "A promising approach to improving software quality is to enhance
+ programming languages with declarative constructs, such as logical
+ specifications or examples of desired behavior, and to use program
+ synthesis technology to translate these constructs into efficiently
+ executable code. In order to produce code that provably satisfies a
+ rich specification, the synthesizer needs an advanced program
+ verification mechanism that is sufficiently expressive and fully
+ automatic. In this talk, I will present a program synthesis technique
+ based on refinement type checking: a verification mechanism that
+ supports automatic reasoning about expressive properties through a
+ combination of types and SMT solving.
+
+ The talk will present two applications of typedriven synthesis. The
+ first one is a tool called Synquid, which creates recursive functional
+ programs from scratch given a refinement type as input. Synquid is the
+ first synthesizer powerful enough to automatically discover programs
+ that manipulate complex data structures, such as balanced trees and
+ propositional formulas. The second application is a language called
+ Lifty, which uses typedriven synthesis to repair information flow
+ leaks. In Lifty, the programmer specifies expressive information flow
+ policies by annotating the sources of sensitive data with refinement
+ types, and the compiler automatically inserts access checks necessary
+ to enforce these policies across the code."
+}
+
+\end{chunk}
+
+\index{Post, Emil L.}
+\begin{chunk}{axiom.bib}
+@article{Post44,
+ author = "Post, Emil L.",
+ title = {{Recursively Enumerable Sets of Postive Integers and Their
+ Decision Problems}},
+ journal = "Bull. Amer. Math Soc.",
+ volume = "50",
+ pages = "284316",
+ year = "1944",
+ paper = "Post44.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\subsection{Q} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{R} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Rees, Jonathan}
+\index{Adams, Norman I.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Rees82,
+ author = "Rees, Jonathan and Adams, Norman I",
+ title = {{T: A Dialect of Lisp or, LAMBDA: The Ultimate Software Tool}},
+ booktitle = "Sym. on Lisp and Functional Programming",
+ pages = "114122",
+ year = "1982",
+ abstract =
+ "The T proejct is an experiment in language design and
+ implementation. Its purpose is to test the thesis developed by
+ Steele and Sussman in their series of papers about the Scheme
+ language: that Scheme may be used as the basis for a practical
+ programming language of exceptional expressive power, and that
+ implementations of Scheme could perform better than other Lisp
+ systems, and competitively with implementations of programming
+ languages, such as C and Bliss, which are usually considered to be
+ inherently more efficient than Lisp on conventaional machine
+ architectures. We are developing a portable implementation of T,
+ currently targeted for the VAX under the Unix and VMS operating
+ systems and for the Apollo, a MC68000based workstation.",
+ paper = "Rees82.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Remy, Didier}
+\begin{chunk}{axiom.bib}
+@techreport{Remy92,
+ author = "Remy, Didier",
+ title = {{Extensions to ML type system with a sorted equation theory
+ on types}},
+ type = "research report",
+ institution = "INRIA",
+ number = "RR1766",
+ year = "1992",
+ abstract =
+ "We extend the ML language by allowing a sorted regular equational
+ theory on types for which unification is decidable and unitary. We
+ prove that the extension keeps principlal typings and subject
+ reduction. A new set of typing rules is proposed so that type
+ generalization is simpler and more efficient. We consider typing
+ problems as general unification problems, which we solve with a
+ formalism of unificands. Unificands naturally deal with sharing
+ between types and lead to a more efficient type inference
+ algorithm. The use of unificands also simplifies the proof of
+ correctness of the algorithm by splitting it into more elementary
+ steps.",
+ paper = "Remy92.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Reynolds, John C.}
\begin{chunk}{axiom.bib}
@misc{Reyn94,
@@ 12877,8 +14682,304 @@ when shown in factored form.
\end{chunk}
+\index{Reynolds, John C.}
+\begin{chunk}{axiom.bib}
+@inbook{Reyn97,
+ author = "Reynolds, John C.",
+ title = {{The Essence of Algol}},
+ booktitle = "ALGOLlike languages, Volume 1",
+ publisher = "Birkhauser Boston Inc.",
+ chapter = "1",
+ pages = "6788",
+ isbn = "0817638806",
+ year = "1997",
+ abstract =
+ "Although Algol 60 has been uniquely influential in programming
+ language design, its descendants have been signicantly different than
+ their prototype. In this paper, we enumerate the principles that we
+ believe embody the essence of Algol, describe a model that satisfies
+ these principles, and illustrate this model with a language that,
+ while more uniform and general, retains the character of Algol.",
+ paper = "Reyn97.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Rossberg, Andreas}
+\begin{chunk}{axiom.bib}
+@misc{Ross13,
+ author = "Rossberg, Andreas",
+ title = {{HaMLet: To Be Or Not To Be Standard ML}},
+ year = "2013",
+ link = "\url{https://people.mpisws.org/~rossberg/hamlet}",
+ paper = "Ross13.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\subsection{S} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Sannella, Donald}
+\begin{chunk}{axiom.bib}
+@article{Sann86,
+ author = "Sannella, Donald",
+ title = {{Extended ML: An InstitutionIndependent Framework for
+ Formal Program Development}},
+ booktitle = "Proc. Workshop on Category Theory and Computer Programming",
+ journal = "LNCS",
+ volume = "240",
+ pages = "364389",
+ year = "1986",
+ paper = "Sann86.pdf"
+}
+
+\end{chunk}
+
+\index{Sannella, Donald}
+\index{Tarlecki, Andrzej}
+\begin{chunk}{axiom.bib}
+@article{Sann87,
+ author = "Sannella, Donald and Tarlecki, Andrzej",
+ title = {{On Observational Equivalence and Algebraic Specification}},
+ journal = "J. of Computer and System Sciences",
+ volume = "34",
+ pages = "150178",
+ year = "1987",
+ abstract =
+ "The properties of a simple and natural notion of observational
+ equivalence of algebras and the corresponding specificationbuilding
+ operation are studied. We begin with a defmition of observational
+ equivalence which is adequate to handle reachable algebras only, and
+ show how to extend it to cope with unreachable algebras and also how
+ it may be generalised to make sense under an arbitrary institution.
+ Behavioural equivalence is treated as an important special case of
+ observational equivalence, and its central role in program development
+ is shown by means of an example.",
+ paper = "Sann87.pdf"
+}
+
+\end{chunk}
+
+\index{Sannella, Donald}
+\index{Tarlecki, Andrzej}
+\begin{chunk}{axiom.bib}
+@article{Sann88,
+ author = "Sannella, Donald and Tarlecki, Andrzej",
+ title = {{Specifications in an Arbitrary Institution}},
+ journal = "Information and Computation",
+ volume = "76",
+ pages = "165210",
+ year = "1988",
+ abstract =
+ "A formalism for constructing and using axiomatic specifications in an
+ arbitrary logical system is presented. This builds on the framework
+ provided by Goguen and Burstall’s work on the notion of an institution
+ as a formalisation of the concept of a logical system for writing
+ specifications. We show how to introduce free variables into the
+ sentences of an arbitrary institution and how to add quantitiers which
+ bind them. We use this foundation to define a set of primitive
+ operations for building specifications in an arbitrary institution
+ based loosely on those in the ASL kernel specification language. We
+ examine the set of operations which results when the definitions are
+ instantiated in institutions of total and partial tirstorder logic
+ and compare these with the operations found in existing specification
+ languages. We present proof rules which allow proofs to be conducted
+ in specifications built using the operations we define. Finally, we
+ introduce a simple mechanism for defining and applying parameterised
+ specifications and briefly discuss the program development process.",
+ paper = "Sann88.pdf"
+}
+
+\end{chunk}
+
+\index{Sannella, Donald}
+\index{Tarlecki, Andrzej}
+\begin{chunk}{axiom.bib}
+@inproceedings{Sann89,
+ author = "Sannella, Donald and Tarlecki, Andrzej",
+ title = {{Toward Formal Development of ML Programs: Foundations and
+ Methodology (Extended Abstract}},
+ booktitle = "Int. Conf. on Theory and Practice of Software Development",
+ pages = "375389",
+ publisher = "Springer",
+ year = "1989",
+ abstract =
+ "A methodology is presented for the formal development of modular
+ Standard ML programs from specifications. Program development proceeds
+ via a sequence of design (modular decomposition), coding and
+ refinement steps. For each of these three kinds of step, conditions
+ are given which ensure the correctness of the result. These conditions
+ seem to be as weak as possible under the constraint of being
+ expressible as local interface matching requirements.",
+ paper = "Sann89.pdf"
+}
+
+\end{chunk}
+
+\begin{chunk}{axiom.bib}
+@article{SCSCP10,
+ author = "Unknown",
+ title = {{Symbolic Computation Software Composability Project and
+ its implementations}},
+ journal = "ACM Comm. in Computer Algebra",
+ volume = "44",
+ number = "4",
+ year = "2010",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Shivers, Olin}
+\begin{chunk}{axiom.bib}
+@techreport{Shiv90,
+ author = "Shivers, Olin",
+ title = {{DataFlow Analysis and Type Recovery in Scheme}}
+, year = "1990",
+ type = "technical report",
+ institution = "Carnegie Mellon University",
+ number = "CMUCS90115",
+ abstract =
+ "The lack of explicit type information in Scheme prevents the
+ application of many compiler optimizations. Implicit type
+ information can oftentimes be recovered by analyzing the flow of
+ control through primitive operations and conditionals. Such flow
+ analysis, however, is difficult in the presence of higherorder
+ functions. This paper presents a technique for recovering type
+ information fro mScheme programs using flow analysis. The
+ algorithm used for flow analysis is semantically based, and is
+ novel in that it correctly handles the ``environment flow''
+ problem. Several examples of a working implementation of the
+ analysis are presented. The techniques are applicable not only to
+ Scheme type recovery, but also to related languages, such as
+ Common Lisp and Standard ML, and to related dataflow analyses,
+ such as range analysis, future analysis, and copy propagation.",
+ paper = "Shiv90.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Soare, Robert I.}
+\begin{chunk}{axiom.bib}
+@article{Soar96,
+ author = "Soare, Robert I.",
+ title = {{Computability and Recursion}},
+ journal = "Bulletin of Symbolic Logic",
+ volume = "2",
+ year = "1996",
+ pages = "284321",
+ link = "\url{http://www.people.cs.uchicago.edu/~soare/History/compute.pdf}",
+ abstract =
+ "We consider the informal concept of 'computability' or 'effective
+ calculability' and two of the formalisms commonly used to define
+ it, '(Turing) computability' and '(general) recursiveness'. We
+ consider their origin, exact technical definition, concepts,
+ history, general English meanings, how they became fixed in their
+ present roles, how they were first and are now used, their impact
+ on nonspecialists, how their use will affect the future content of
+ the subject of computability theory, and its connection to other
+ related areas.
+
+ After a careful historical and conceptual analysis of
+ computability and recursion we make several recommentations in
+ section 7 about preserving the {\sl intensional} differences
+ between the concepts of 'computability' and
+ 'recursion'. Specificually we recommend that: the term 'recursive'
+ should no longer carry the additional meaning of 'computable' or
+ 'decidable', functions defined using Turing machines, register
+ machines, or their variants should be called 'computable' rather
+ than 'recursive', we should distinguish the intensional difference
+ between Church's Thesis and Turing's Thesis, and use the latter
+ particluarly in dealing with mechanistic questions; the name of
+ the subject should be '{\sl Computability Theory}' or simply
+ {\sl Computability} rather than 'Recursive Function Theory'",
+ paper = "Soar96.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Soare, Robert I.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Soar07,
+ author = "Soare, Robert I.",
+ title = {{Computability and Incomputability}},
+ booktitle = "Proc. Third Conf. on Computability in Europe",
+ year = "2007",
+ publisher = "SpringerVerlog",
+ isbn = "139783540730002",
+ link = "\url{http://www.people.cs.uchicago.edu/~soare/History/siena.pdf}",
+ abstract =
+ "The conventional wisdom presented in most computability books and
+ historical papers is that there were several researchers in the early
+ 1930’s working on various precise definitions and demonstrations of a
+ function specified by a finite procedure and that they should all
+ share approximately equal credit. This is incorrect. It was Turing
+ alone who achieved the characterization, in the opinion of G ̈odel. We
+ also explore Turing’s oracle machine and its analogous properties in
+ analysis.",
+ paper = "Soar07.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Soare, Robert I.}
+\begin{chunk}{axiom.bib}
+@misc{Soar99,
+ author = "Soare, Robert I.",
+ title = {{The History and Concept of Computability}},
+ booktitle = "Handbook of Computability Theory",
+ publisher = "NorthHolland",
+ link = "\url{http://www.people.cs.uchicago.edu/~soare/History/handbook.pdf}",
+ year = "1999",
+ paper = "Soar99.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Steenkiste, Peter}
+\index{Hennessy, John}
+\begin{chunk}{axiom.bib}
+@article{Stee87,
+ author = "Steenkiste, Peter and Hennessy, John",
+ title = {{Tags and Type Checking in LISP: Hardware and Software Approaches}},
+ journal = "ACM SIGPLAN Notices",
+ volume = "22",
+ number = "10",
+ pages = "5059",
+ year = "1987",
+ abstract =
+ "One of the major factors that distinguishes LISP from many other
+ languages (Pascal, C, Fortran, etc.) is the need for runtime type
+ checking. Runtime type checking is implemented by adding to each
+ data object a tag that encodes type information. Tags must be
+ compared for type compatibility, removed when using the data, and
+ inserted when new data items are created. This tag manipulation,
+ together with other work related to dynamic type checking and
+ generic operations, constitutes a significant component of the
+ execution time of LISP programs. This has led both to the
+ development of LISP machines that support tag checking in hardware
+ and to the avoidance of type checking by users running on stock
+ hardware. To understand the role and necessity of specialpurpose
+ hardware for tag handling, we first measure the cost of type
+ checking operations for a group of LISP programs. We then examine
+ hardware and software implementations of tag operations and
+ estimate the cost of tag handling with the different tag
+ implementation schemes. The data shows that minimal levels of
+ support provide most of the benefits, and that tag operations can
+ be relatively inexpensive, even when no special hardware support
+ is present.",
+ paper = "Stee87.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\subsection{T} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\index{BreazuTannen, Val}
@@ 12925,6 +15026,38 @@ when shown in factored form.
\end{chunk}
+\index{Tarditi, David}
+\index{Morrisett, Greg}
+\index{Cheng, Perry}
+\index{Stone, Chris}
+\index{Harper, Roboert}
+\index{Lee, Peter}
+\begin{chunk}{axiom.bib}
+@inproceedings{Tard03,
+ author = "Tarditi, David and Morrisett, Greg and Cheng, Perry and
+ Stone, Chris and Harper, Roboert and Lee, Peter",
+ title = {{TIL: A TypeDirected, Optimizing Compiler for ML}},
+ booktitle = "20 Years of Prog. Lang. Design and Implementation",
+ year = "2003",
+ publisher = "ACM",
+ isbn = "1581136234",
+ paper = "Tard03.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Torlak, Emina}
+\begin{chunk}{axiom.bib}
+@misc{Torl17,
+ author = "Torlak, Emina",
+ title = {{Synthesis and Verifcation for All}},
+ link = "\url{https://www.youtube.com/watch?v=KpDyuMIb_E0}",
+ year = "2017"
+}
+
+\end{chunk}
+
\subsection{U} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{V} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ 12965,8 +15098,176 @@ when shown in factored form.
\end{chunk}
+\index{Wallenburg, Angela}
+\begin{chunk}{axiom.bib}
+@phdthesis{Wall04,
+ author = "Wallenburg, Angela",
+ title = {{Inductive Rules for Proving Correctness of Imperative Programs}},
+ school = "Goteborg University",
+ link = "\url{http://www.cse.chalmers.se/~angelaw/papers/lic.pdf}",
+ year = "2004",
+ abstract =
+ "This thesis is aimed at simplifying the userinteraction in
+ semiinteractive theorem proving for imperative programs. More
+ specically , we describe the creation of customised induction rules
+ that are tailormade for the specific program to verify and thus make
+ the resulting proof simpler. The concern is in user interaction,
+ rather than in proof strength. To achiev e this, two different
+ verication techniques are used.
+
+ In the first approach, we develop an idea where a software testing
+ technique, partition analysis, is used to compute a partition of the
+ domain of the induction variable, based on the branch predicates in
+ the program we wish to prove correct. Based on this partition we
+ derive mechanically a partitioned induction rule, which then inherits
+ the divideandconquer style of partition analysis, and (hopefully) is
+ easier to use than the standard (Peano) induction rule.
+
+ The second part of the thesis continues with a more thorough
+ development of the method. Here the connection to software testing is
+ completely removed and the focus is on inductive theorem proving only.
+ This time, we make use of failed proof attempts in a theorem prover
+ to gain information about the problem structure and create the
+ partition. Then, based on the partition we create an induction rule,
+ in destructor style, that is customised to make the proving of the
+ loop simpler.
+
+ With the customised induction rules, in comparison to standard (Peano)
+ induction or Noetherian induction, the required user interaction is
+ moved to an earlier point in the proof which also becomes more
+ modularised. Moreover, by using destructor style induction we
+ circumvent the problem of creating inverses of functions. The
+ soundness of the customised induction rules created by the method is
+ shown. Furthermore, the machinery of the theorem prover (KeY) is used
+ to make the method automatic. The induction rules are developed to
+ prove the total correctness of loops in an objectoriented language
+ and we concentrate on integers.",
+ paper = "Wall04.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Wallenburg, Angela}
+\begin{chunk}{axiom.bib}
+@misc{Wall09,
+ author = "Wallenburg, Angela",
+ title = {{Generalisation of Inductive Formulae based on Proving by
+ Symbolic Execution}},
+ year = "2009",
+ link = "\url{http://www.cse.chalmers.se/~angelaw/papers/awWING2009.pdf}",
+ abstract =
+ "Induction is a powerful method that can be used to prove the total
+ correctness of program loops. Unfortunately the induction proving
+ process in an interactive theorem prover is often very cumbersome. In
+ particular it can be difficult to find the right induction formula.
+ We describe a method for generalising induction formulae by analysing
+ a symbolic proof attempt in a semiinteractive firstorder theorem
+ prover. Based on the proof attempt we introduce universally
+ quantified variables, metavariables and sets of constraints on these.
+ The constraints describe the conditions for a successful proof. By
+ the help of examples, we outline some classes of problems and their
+ associated constraint solutions, and possible ways to automate the
+ constraint solving.",
+ paper = "Wall09.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{van der Walt, Paul}
+\begin{chunk}{axiom.bib}
+@mastersthesis{Walt12,
+ author = "van der Walt, Paul",
+ title = {{Reflection in Agda}},
+ school = "Utrecht University",
+ year = "2012",
+ abstract =
+ "This project explores the recent addition to Agda enabling
+ reflection, in the style of Lisp, MetaML, and Template Haskell. It
+ illustrates several possible applications of reflection that arise in
+ dependently typed programming, and details the limitations of the
+ current implementation of reflection. Examples of typesafe
+ metaprograms are given that illustrate the power of reflection coupled
+ with a dependently typed language. Among other things the limitations
+ inherent in having quote and unquote implemented as keywords are
+ highlighted. The fact that lambda terms are returned without typing
+ information is discussed, and a solution is presented. Also provided
+ is a detailed users’ guide to the reflection API and a library of
+ working code examples to illustrate how various common tasks can be
+ performed, along with suggestions for an updated reflection API in a
+ future version of Agda.",
+ paper = "Walk12.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Weirich, Stephanie}
+\begin{chunk}{axiom.bib}
+@misc{Weir18,
+ author = "Weirich, Stephanie",
+ title = {{Dependent Types in Haskell}},
+ year = "2018",
+ link = "\url{https://www.youtube.com/watch?v=wNa3MMbhwS4}",
+ abstract =
+ "What has dependent type theory done for Haskell? Over the past ten
+ years, the Glasgow Haskell compiler (GHC) has adopted many type system
+ features inspired by dependent type theory. In this talk, I will
+ discuss the influence of dependent types on the design of GHC and on
+ the practice of Haskell programmers. In particular, I will walk
+ through an extended example and use it to analyze what it means to
+ program with with dependent types in Haskell. Throughout, I will will
+ discuss what we have learned from this experiment in language design:
+ what works now, what doesn't work yet, and what surprised us along
+ the way."
+}
+
+\end{chunk}
+
+\index{Winkler, Franz}
+\begin{chunk}{axiom.bib}
+@book{Wink84,
+ author = "Winkler, Franz",
+ title = {{The Church Rosser Property in Computer Algebra and Special
+ Theorem Proving}},
+ publisher = "Wien",
+ year = "1984",
+ isbn = "3853695841"
+}
+
+\end{chunk}
+
+\index{Wirth, Niklaus}
+\begin{chunk}{axiom.bib}
+@misc{Wirt15,
+ author = "Wirth, Niklaus",
+ title = {{The Design of a RISC Architecture and its Implementation
+ with an FPGA}},
+ link =
+ "\url{https://www.inf.ethz.ch/personal/wirth/FPGArelatedWork/RISC.pdf}",
+ year = "2015",
+ paper = "Wirt15.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\subsection{X} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Y} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Yoshida, Masaaki}
+\begin{chunk}{axiom.bib}
+@book{Yosh97,
+ author = "Yoshida, Masaaki",
+ title = {{Hypergeometric Functions, My Love}},
+ publisher = "Springer",
+ year = "1997",
+ isbn = "9783322901682"
+}
+
+\end{chunk}
+
\subsection{Z} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Proving Axiom Correct  Spring 2018}
@@ 13523,7 +15824,7 @@ when shown in factored form.
\index{Atkey, Robert}
\begin{chunk}{axiom.bib}
@misc{Atke18,
+@misc{Atke18a,
author = "Atkey, Robert",
title = {{Parameterised Notions of Computation}},
link = "\url{https://bentnib.org/paramnotionsjfp.pdf}",
@@ 13545,7 +15846,7 @@ when shown in factored form.
soundly and completely model our categorical definitions — with and
without symmetric monoidal parameterisation — and act as prototypical
languages with parameterised effects.",
 paper = "Atke18.pdf",
+ paper = "Atke18a.pdf",
keywords = "printed"
}
@@ 13607,7 +15908,8 @@ when shown in factored form.
author = "Bailey, David H. and Borwein, Jonathan M.",
title = {{Highprecision Numerical Integration: Progress and Challenges}},
journal = "J. Symbolic Computation",
 number = "46",
+ volume = "46",
+ number = "7",
pages = "741754",
year = "2011",
abstract =
@@ 16415,6 +18717,7 @@ when shown in factored form.
author = "Hamada, Tatsuyoshi",
title = {{MathLibre: Personalizable Computer Environment for
Mathematical Research}},
+ journal = "ACM Communications in Computer Algebra",
volume = "48",
number = "3",
year = "2014",
@@ 17042,7 +19345,7 @@ when shown in factored form.
\index{Knuth, Donald E.}
\index{Bendix, Peter B.}
\begin{chunk}
+\begin{chunk}{axiom.bib}
@incollection{Knut70,
author = "Knuth, Donald E. and Bendix, Peter B.",
title = {{Simple Word Problems in Unversal Algebras}},
@@ 18600,7 +20903,7 @@ when shown in factored form.
@book{Paul87,
author = "Paulson, Lawrence C.",
title = {{Logic and Computation}},
 publisher = "Press Synticate of Cambridge University",
+ publisher = "Press Syndicate of Cambridge University",
year = "1987",
isbn = "0521346320"
}
@@ 26127,11 +28430,11 @@ when shown in factored form.
\index{Bertot, Yves}
\index{Gonthier, Georges}
\begin{chunk}{axiom.bib}
@book{Mahb16,
+@book{Mahb18,
author = "Mahboubi, Assia and Tassi, Enrico and Bertot, Yves and
Gonthier, Georges",
title = {{Mathematical Components}},
 year = "2016",
+ year = "2018",
publisher = "mathcomp.github.io/mcb",
link = "\url{https://mathcomp.github.io/mcb/book.pdf}",
abstract =
@@ 26166,7 +28469,8 @@ when shown in factored form.
the first part, the reader has learnt how to prove formally the
infinitude of prime numbers, or the correctnes of the Euclidean's
division algorithm, in a few lines of proof text.",
 paper = "Mahb16.pdf"
+ paper = "Mahb18.pdf",
+ keywords = "printed"
}
\end{chunk}
@@ 34923,7 +37227,9 @@ Proc ISSAC 97 pp172175 (1997)
inference algorithm can often produce sharper bounds than
unificationbased type inference techniques. At the present time,
however, our treatment of higherorder data structures and functions
 is not as elegant as that of the unification techniques."
+ is not as elegant as that of the unification techniques.",
+ paper = "Bake90.html",
+ keywords = "printed"
}
\end{chunk}
@@ 61086,7 +63392,8 @@ Lecture Notes in Computer Science. 76 (1979) SpringerVerlag
type theory and is offered as being of interest on this basis
(whatever may be thought of the finally satisfactory character of the
theory of types as a foundation for logic and mathematics).",
 paper = "Chur40.pdf"
+ paper = "Chur40.pdf",
+ keywords = "printed"
}
\end{chunk}
@@ 64117,6 +66424,20 @@ Springer Verlag Heidelberg, 1989, ISBN 0387969802
\end{chunk}
+\index{Kaufmann, Matt}
+\index{Manolios, Panagiotis}
+\index{Moore, J Strother}
+\begin{chunk}{axiom.bib}
+@book{Kauf00a,
+ author = "Kaufmann, Matt and Manolios, Panagiotis and Moore, J Strother",
+ title = {{ComputerAided Reasoning: ACL2 Case Studies}},
+ publisher = "Kluwer Academic Publishers",
+ year = "2000",
+ isbn = "0792378490"
+}
+
+\end{chunk}
+
\index{Khan, Muhammad Taimoor}
\index{Schreiner, Wolfgang}
\begin{chunk}{axiom.bib}
diff git a/changelog b/changelog
index d1311c1..2942860 100644
 a/changelog
+++ b/changelog
@@ 1,3 +1,6 @@
+20181211 tpd src/axiomwebsite/patches.html 20181211.01.tpd.patch
+20181211 tpd books/bookvolbib add references
+20181211 tpd readme add quote
20181003 tpd src/axiomwebsite/patches.html 20181003.01.tpd.patch
20181003 tpd src/input/kernel.input add additional Waldek examples
20180921 tpd src/axiomwebsite/patches.html 20180921.01.tpd.patch
@@ 9,7 +12,7 @@
20180717 tpd src/axiomwebsite/patches.html 20180717.01.tpd.patch
20180717 tpd src/axiomwebsite/documentation.html redirect downloads to github
20180714 tpd src/axiomwebsite/patches.html 20180714.01.tpd.patch
20180714 tpd src/axiomwebsite/currentstate.html
+20180714 tpd src/axiomwebsite/currentstate.html
20180703 tpd src/axiomwebsite/patches.html 20180703.03.tpd.patch
20180703 tpd books/bookheader.tex fix typo
20180703 tpd src/axiomwebsite/patches.html 20180703.02.tpd.patch
diff git a/patch b/patch
index dc33b19..1e60341 100644
 a/patch
+++ b/patch
@@ 2,529 +2,2204 @@ books/bookvolbib add references
Goal: Proving Axiom Sane
\index{Hearn, Peter W.O.}
+\index{Murray, Toby}
+\index{van Oorcchot, P.C.}
\begin{chunk}{axiom.bib}
@inproceedings{Hear18,
 author = "O'Hearn, Peter W.",
 title = {{Continuous Reasoning: Scaling the Impact of Formal Methods}},
 booktitle = "LICS 18",
+@misc{Murr18,
+ author = "Murray, Toby and van Oorcchot, P.C.",
+ title = {{BP: Formal Proofs, the Fine Print and Side Effects}},
+ link = "\url{https://people.eng.unimelb.edu.au/tobym/papers/secdev2018.pdf}",
+ comment = "Version: 26 June 2018 to appear in IEEE SecDev 2018",
+ abstract =
+ "Given recent highprofile successes in formal verification of
+ securityrelated properties (e.g. for seL4), and the rising
+ popularity of applying formal methods to cryptographic libraries
+ and security protocols like TLS, we revisit the meaning of
+ securityrelated proofs about software. We reexamine old issues,
+ and identify new questions that have escaped scrutiny in the
+ formal methods literature. We consider what value proofs about
+ software systems deliver to endusers (e.g. in terms of net
+ assurance benefits), and at what cost in terms of side effects
+ (such as changes made to software to facilitate the proofs, and
+ assumptionrelated deployment restrictions imposed on software if
+ these proofs are to remain valid in operation). We consider in
+ detail, for the first time to our knowledge, possible
+ relationships between proofs and side effects. To make our
+ discussion concrete, we draw on tangible examples, experience, and
+ the literature.",
+ paper = "Murr18.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Filliatre, JeanChristophe}
+\begin{chunk}{axiom.bib}
+@misc{Fill13a,
+ author = "Filliatre, JeanChristophe",
+ title = {{Deductive Program Verification}},
+ year = "2013",
+ comment = "slides"
+}
+
+\end{chunk}
+
+\index{Fredrikson, Matt}
+\begin{chunk}{axiom.bib}
+@misc{Fred16,
+ author = "Fredrikson, Matt",
+ title = {{Automated Program Verification and Testing}},
+ comment = "slides",
+ year = "2016"
+}
+
+\end{chunk}
+
+\index{Clochard, Martin}
+\index{Gondelman, Leon}
+\index{Pereira, Mario}
+\begin{chunk}{axiom.bib}
+@article{Cloc18,
+ author = "Clochard, Martin and Gondelman, Leon and Pereira, Mario",
+ title = {{The Matrix Reproved}},
+ journal = "Journal of Automated Reasoning",
+ volume = "60",
+ number = "3",
+ pages = "365383",
year = "2018",
 publisher = "ACM",
 isbn = "9781450355834",
+ link = "\url{https://hal.inria.fr/hal01617437/document}",
abstract =
 "This paper describes work in continuous reasoning , where formal
 reasoning about a (changing) codebase is done in a fashion which
 mirrors the iterative, continuous model of software development that
 is increasingly practiced in indus try. We suggest that advances in
 continuous reasoning will allow formal reasoning to scale to more
 programs, and more programmers. The paper describes the rationale for
 contin uous reasoning, outlines some success cases from within
 industry, and proposes directions for work by the scientific
 community.",
 paper = "Hear18.pdf",
+ "In this paper we describe a complete solution for the
+ first challenge of the VerifyThis 2016 competition held at the 18th
+ ETAPS Forum. We present the proof of two variants for the
+ multiplication of matrices: a naive version using three nested loops
+ and Strassen's algorithm. The proofs are conducted using the Why3
+ platform for deductive program verification and automated theorem
+ provers to discharge proof obligations. In order to specify and
+ prove the two multiplication algorithms, we develop a new Why3
+ theory of matrices. In order to prove the matrix identities on which
+ Strassen's algorithm is based, we apply the proof by reflection
+ methodology, which we implement using ghost state. To our knowledge,
+ this is the first time such a methodology is used under an
+ autoactive setting.",
+ paper = "Cloc18.pdf",
keywords = "printed"
}
\end{chunk}
\index{Lester, David R.}
+\index{Balzer, Stephanie}
+\index{Pfenning, Frank}
+\index{Toninho, Bernardo}
\begin{chunk}{axiom.bib}
@phdthesis{Lest89,
 author = "Lester, David R.",
 title = {{Combinator Graph Reduction: A Congruence and its Applications}},
 year = "1989",
 school = "Oxford University",
 isbn = "0902928554",
 abstract =
 "The GMachine is an efficient implementation of lazy functional
 languages developed by Augustsson and Johnsson. This thesis may be
 read as a formal mathematical proof that the Gmachine is correct with
 respect to a denotational semantic specification of a simple
 language. It also has more general implications. A simple lazy
 functional language is defined both denotationally and operationally;
 both are defined to handle erroneous results. The operational
 semantics models combinator graph reduction, and is based on reduction
 to weak head normal form. The two semantic definitions are shown to be
 congruent. Because of error handling the language is not
 confluent. Complete strictness is shown to be a necessary and
 sufficient condition for changing lazy function calls to strict
 ones. As strictness analyses are usually used with confluent
 languages, methods are discussed to restore this property. The
 operational semantic model uses indirection nodes to implement
 sharing. An alternative, which is without indirection nodes, is shown
 to be operationally equivalent for terminating programs. The Gmachine
 is shown to be a representation of the combinator graph reduction
 operational model. It may be represented by the composition of a small
 set of combinators which correspond to an abstract machine instruction
 set. Using a modified form of graph isomorphism, alternative sequences
 of instructions are shown to be isomorphic, and hence may be used
 interchangeably.",
 paper = "Lest89.pdf"
}

\end{chunk}

\index{Jones, Simon Peyton}
\begin{chunk}{axiom.bib}
@book{Jone00,
 author = "Jones, Simon Peyton",
 title = {{Implementing Functional Languages: A Tutorial}},
 publisher = "University of Glasgow",
 year = "2000",
 link = "\url{https://www.microsoft.com/enus/research/wpcontent/uploads/1992/01/student.pdf}",
 paper = "Jone00.pdf"
}

\end{chunk}

\index{Hudak, Paul}
\begin{chunk}{axiom.bib}
@misc{Huda89,
 author = "Hudak, Paul",
 title = {{The Conception, Evolution, and Application of Functional
 Programming Languages}},
 link = "\url{http://haskell.cs.yale.edu/wpcontent/uploads/2011/01/cs.pdf}",
 year = "1989",
+@article{Balz18,
+ author = "Balzer, Stephanie and Pfenning, Frank and Toninho, Bernardo",
+ title = {{A Universal Session Type for Untyped Asynchronous Communication}},
+ journal = "CONCUR",
+ volume = "30",
+ number = "1",
+ year = "2018",
+ link = "\url{https://www.cs.cmu.edu/~fp/papers/univtype18.pdf}",
abstract =
 "The foundations of functional programming languages are examined from
 both historical and technical perspectives. Their evolution is traced
 through several critical periods: early work on lambda calculus and
 combinatory calculus, Lisp, Iswim, FP, ML, and modern functional
 languages such as Miranda 1 and Haskell. The fundamental premises on
 which the functional programming methodology stands are critically
 analyzed with respect to philosophical, theoretical, and pragmatic
 concerns. Particular attention is paid to the main features that
 characterize modern functional languages: higherorder functions, lazy
 evaluation, equations and patternmatching, strong static typing and
 type inference, and data abstraction. In addition, current research
 areas—such as parallelism, nondeterminism, input/output, and
 stateoriented computations—are examined with the goal of predicting
 the future development and application of functional languages.",
 paper = "Huda89.pdf",
+ "In the simplytyped $\lambda$calculus we can recover the full
+ range of expressiveness of the untyped $\lambda$calculus solely
+ by adding a single recursive type
+ $\mathscr{U} = \mathscr{U}\rightarrow \mathscr{U}$. In contract,
+ in the sessiontyped $\Pi$calculus, recursion alone is
+ insufficient to recover the untyped $\Pi$calculus, primarily due
+ to linearity: each channel just has two unique endpoints. In this
+ paper, we show that shared channels with a corresponding sharing
+ semantics (base on the the language SILL$_s$ developed in prior
+ work) are enough to embed the untyped asynchronous $\Pi$calculus
+ via a universal shared session type $\mathscr{U}_s$. We show that
+ our encoding of the asynchronous $\Pi$calculus satisfies
+ operational correspondence and preserves observable actions (i.e.,
+ processes are weakly bisimilar to their encoding). Moreover, we
+ clarify the expressiveness of SILL$_s$ by developing an
+ operationally correct encoding of SILL$_s$ in the asynchronous
+ $\Pi$calculus.",
+ paper = "Balz18.pdf",
keywords = "printed"
}
\end{chunk}
\index{Grayson, Daniel R.}
+\index{Balzer, Stephanie}
+\index{Pfenning, Frank}
\begin{chunk}{axiom.bib}
@misc{Gray18,
 author = "Grayson, Daniel R.",
 title = {{An Introduction to Univalent Foundations for Mathematicians}},
 link = "\url{http://arxiv.org/pdfs/1711.01477v3}",
 year = "2018",
 abstract =
 "We offer an introduction for mathematicians to the univalent
 foundations of Vladimir Voevodsky, aiming to explain how he chose to
 encode mathematics in type theory and how the encoding reveals a
 potentially viable foundation for all of modern mathematics that can
 serve as an alternative to set theory",
 paper = "Gray18.pdf",
+@inproceedings{Balz17,
+ author = "Balzer, Stephanie and Pfenning, Frank",
+ title = {{Manifest Sharing with Session Types}},
+ booktitle = "Proc. ACM Program. Lang.",
+ publisher = "ACM",
+ year = "2017"
+ link = "\url{https://www.cs.cmu.edu/~fp/papers/icfp17.pdf}",
+ abstract =
+ "Sessiontyped languages building on the CurryHoward isomorphism
+ between linear logic and sessiontyped communication guarantee session
+ fidelity and deadlock freedom. Unfortunately, these strong guarantees
+ exclude many naturally occurring programming patterns pertaining to
+ shared resources. In this paper, we introduce sharing into a
+ sessiontyped language where types are stratified into linear and
+ shared layers with modal operators connecting the layers. The
+ resulting language retains session fidelity but not the absence of
+ deadlocks, which can arise from contention for shared processes. We
+ illustrate our language on various examples, such as the dining
+ philosophers problem, and provide a translation of the untyped
+ asynchronous $\Pi$calculus into our language.",
+ paper = "Balz17.pdf",
keywords = "printed"
}
\end{chunk}
\index{BreazuTannen, Val}
\index{Coquand, Thierry}
\index{Gunter, Carl A.}
\index{Scedrov, Andre}
\begin{chunk}{axiom.bib}
@article{Tann93,
 author = "BreazuTannen, Val and Coquand, Thierry and Gunter, Carl A.
 and Scedrov, Andre",
 title = {{Inheritance as Implicit Coercion}},
 journal = "Information and Computation",
 volume = "93",
 pages = "172221",
 year = "1991",
+@article{SCSCP10,
+ author = "Unknown",
+ title = {{Symbolic Computation Software Composability Project and
+ its implementations}},
+ journal = "ACM Comm. in Computer Algebra",
+ volume = "44",
+ number = "4",
+ year = "2010",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Bohrer, Brandon}
+\index{Crary, Karl}
+\begin{chunk}{axiom.bib}
+@article{Bohr17,
+ author = "Bohrer, Brandon and Crary, Karl",
+ title = {{TWAM: A Certifying Abstract Machine for Logic Programs}},
+ journal = "ACM Trans. on Computational Logic",
+ volume = "1",
+ number = "1",
+ year = "2017",
+ link = "\url{https://arxiv.org/pdf/1801.00471.pdf}",
abstract =
 "We present a method for providing semantic interpretations for
 languages with a type system featuring inheritance polymorphism. Our
 approach is illustrated on an extension of the language Fun of
 Cardelli and Wegner, which we interpret via a translation into an
 extended polymorphic lambda calculus. Our goal is to interpret
 inheritances in Fun via coercion functions which are definable in the
 target of the translation. Existing techniques in the theory of
 semantic domains can be then used to interpret the extended
 polymorphic lambda calculus, thus providing many models for the
 original language. This technique makes it possible to model a rich
 type discipline which includes parametric polymorphism and recursive
 types as well as inheritance. A central difficulty in providing
 interpretations for explit type disciplines featuring inheritance in
 the sense discussed in this paper arises from the fact that programs
 can typecheck in more than one way. Since interpretations follow the
 typechecking derivations, coherence theorems are required: that is,
 one must prove that the meaning of a program does not depend on the
 way it was typechecked. Proofs of such theorems for our proposed
 interpretation are the basic technical results of this paper.
 Interestingly, proving coherence in the presence of recursive
 types, variants, and abstract types forced us to reexamine fundamental
 equational properties that arise in proof theory (in the form of
 commutative reductions) and domain theory (in the form of strict vs
 nonstrict functions).",
 paper = "Tann93.pdf",
 keywords = "printed"
}

\end{chunk}

\index{Kauers, Manuel}
\begin{chunk}{axiom.bib}
@inproceedings{Kaue08b,
 author = "Kauers, Manuel",
 title = {{Computer Algebra for Special Function Inequalities}},
 booktitle = "Tapas in Experimental Mathematics",
 pages = "215235",
+ "Typepreserving (or typed) compilation uses typing derivations to
+ certify correctness properties of compilation. We have designed and
+ implemented a typepreserving compiler for a simplytyped dialect of
+ Prolog we call TProlog. The crux of our approach is a new certifying
+ abstract machine which we call the Typed Warren Abstract Machine
+ (TWAM). The TWAM has a dependent type system strong enough to specify
+ the semantics of a logic program in the logical framework LF. We
+ present a soundness metatheorem which constitutes a partial
+ correctness guarantee: welltyped programs implement the logic
+ program specified by their type. This metatheorem justifies our design
+ and implementation of a certifying compiler from TProlog to TWAM.",
+ paper = "Bohr17.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Dershowitz, Nachum}
+\index{Gurevich, Yuri}
+\begin{chunk}{axiom.bib}
+\article{Ders08,
+ author = "Dershowitz, Nachum and Gurevich, Yuri",
+ title = {{A Natural Axiomatization of Computability and Proof of
+ Church's Thesis}},
+ journal = "Bulletin of Symbolic Logic",
+ volume = "14",
+ number = "3",
year = "2008",
abstract =
 "Recent coputer proofs for some special function inequalities are
 presented. The algorithmic ideas underlying these computer proofs
 are described, and the conceptual difference to existing
 algorithms for proving special function identities is discussed.",
 paper = "Kaue08b.pdf",
+ "Church’s Thesis asserts that the only numeric functions that can be
+ calculated by effective means are the recursive ones, which are the
+ same, extensionally, as the Turing computable numeric functions. The
+ Abstract State Machine Theorem states that every classical algorithm
+ is behaviorally equivalent to an abstract state machine. This theorem
+ presupposes three natural postulates about algorithmic
+ computation. Here, we show that augmenting those postulates with an
+ additional requirement regarding basic operations gives a natural
+ axiomatization of computability and a proof of Church’s Thesis, as
+ Gödel and others suggested may be possible. In a similar way, but with
+ a different set of basic oper ations, one can prove Turing’s Thesis,
+ characterizing the effective string functions, and—in particular—the
+ effectivelycomputable functions on string representations of numbers.",
+ paper = "Ders08.pdf",
keywords = "printed"
}
\end{chunk}
\index{Callaghan, Paul}
+\index{Boas, Peter van Emde}
\begin{chunk}{axiom.bib}
@article{Call08,
 author = "Callaghan, Paul",
 title = {{Coercive Subtyping via Mappings of Reduction Behaviour}},
 journal = "Electronic Notes in Theoretical Computer Science",
 volume = "196",
 pages = "5368",
 year = "2008",
+@article{Boas12,
+ author = "Boas, Peter van Emde",
+ title = {{Turing Machines for Dummies}}
+ journal = "LNCS",
+ volume = "7147",
+ pages = "1430",
+ year = "2012",
abstract =
 "This paper reports preliminary work on a novel approach to
 Coercive Subtyping that is based on relationships between
 reduction behaviour of source and target types in coerced
 terms. Coercive subtyping is a subset of recordbased subtyping,
 allowing socalled coercion functions to carry the subtyping. This
 allows many novel and powerful forms of subtyping and
 abbreviation, with applicaions including interfaces to theorem
 provers and programming with dependent type systems. However, the
 use of coercion functions introduces nontrivial overheads, and
 requires difficult proof of properties such as coherence in order
 to guarantee sensible results. These points restrict the
 practicality of coercive subtyping. We begin with the idea that
 coercing a value $v$ from type $U$ to a type %T% intuitively means
 that we wish to compute with $v$ as if it was a value in $T$, not
 that $v$ must be converted into a value in $T$. Instead, we
 explore how to compute on $U$ in terms of computation on $T$, and
 develop a framework for mapping computations on some $T$ on
 computations on some $U$ via a simple extension of the elimination
 rule of $T$. By exposing how computations on different types are
 relatd, we gain insight on and make progress with several aspects
 of coercive subtyping, including (a) distinguishing classes of
 coercion and finding reasons to deprecate use of some classes; (b)
 alternative techniques for improving key properties of coercions;
 (c) greater efficiency from implementations of coercions.",
 paper = "Call08.pdf",
+ "Various methods exists in the litearture for denoting the con
+ figuration of a Turing Machine. A key difference is whether the head
+ position is indicated by some integer (mathematical representation) or
+ is specified by writing the machine state next to the scanned tape
+ symbol (intrinsic representation).
+
+ From a mathematical perspective this will make no difference. How
+ ever, since Turing Machines are primarily used for proving undecidability
+ and/or hardness results these representations do matter. Based on
+ a number of applications we show that the intrinsic representation
+ should be preferred."
+ paper = "Boas12.pdf",
keywords = "printed"
}
\end{chunk}
\index{Harper, Robert}
+\index{Gurevich, Yuri}
\begin{chunk}{axiom.bib}
@misc{Harp18,
 author = "Harper, Robert",
 title = {{Computational Type Theory}},
+@article{Gure12,
+ author = "Gurevich, Yuri",
+ title = {{What Is An Algorithm?}},
+ journal = "LNCS",
+ volume = "7147",
+ pages = "3142",
+ year = "2012",
+ abstract =
+ "We attempt to put the title problem and the ChurchTuring thesis into
+ a proper perspective and to clarify some common misconcep tions
+ related to Turing’s analysis of computation. We examine two approaches
+ to the title problem, one wellknown among philosophers and another
+ among logicians.",
+ paper = "Gure12.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Moschovakis, Y.N.}
+\begin{chunk}{axiom.bib}
+@article{Mosc84,
+ author = "Moschovakis, Y.N.",
+ title = {{Abstract Recursion as a Foundation of the Theory of Algorithms}},
+ journal = "LNCS",
+ volume = "1104",
+ pages = "289364",
+ publisher = "Springer",
+ year = "1984",
+ abstract =
+ "The main object of this paper is to describe an abstract,
+ (axiomatic) theory of recursion and its connection with some of
+ the basic, foundational questions of computer science.",
+ paper = "Mosc84.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Pittman, Dan}
+\begin{chunk}{axiom.bib}
+@misc{Pitt18,
+ author = "Pittman, Dan",
+ title = {{Proof Theory Impressionism: Blurring the CurryHoward Line}},
+ link = "\url{https://www.youtube.com/watch?v=jrVPBAd5Gc}",
year = "2018",
 link =
 "\url{http://www.cs.uoregon.edu/research/summerschool/summer18/topics.php}",
 comment = "OPLSS 2018"
+ comment = "Strange Loop 2018 Conference"
}
\end{chunk}
\index{Aspinall, David}
\index{Compagnoni, Adriana}
+\index{Polikarpova, Nadia}
\begin{chunk}{axiom.bib}
@article{Aspi01,
 author = "Aspinall, David and Compagnoni, Adriana",
 title = {{Subtyping Dependent Types}},
 journal = "Theoretical Computer Science",
 volume = "266",
 number = "12",
 pages = "273309",
 year = "2001",
+@misc{Poli18,
+ author = "Polikarpova, Nadia",
+ title = {{TypeDriven Program Synthesis}},
+ link = "\url{https://www.youtube.com/watch?v=HnOix9TFy1A}",
+ year = "2018",
+ abstract =
+ "A promising approach to improving software quality is to enhance
+ programming languages with declarative constructs, such as logical
+ specifications or examples of desired behavior, and to use program
+ synthesis technology to translate these constructs into efficiently
+ executable code. In order to produce code that provably satisfies a
+ rich specification, the synthesizer needs an advanced program
+ verification mechanism that is sufficiently expressive and fully
+ automatic. In this talk, I will present a program synthesis technique
+ based on refinement type checking: a verification mechanism that
+ supports automatic reasoning about expressive properties through a
+ combination of types and SMT solving.
+
+ The talk will present two applications of typedriven synthesis. The
+ first one is a tool called Synquid, which creates recursive functional
+ programs from scratch given a refinement type as input. Synquid is the
+ first synthesizer powerful enough to automatically discover programs
+ that manipulate complex data structures, such as balanced trees and
+ propositional formulas. The second application is a language called
+ Lifty, which uses typedriven synthesis to repair information flow
+ leaks. In Lifty, the programmer specifies expressive information flow
+ policies by annotating the sources of sensitive data with refinement
+ types, and the compiler automatically inserts access checks necessary
+ to enforce these policies across the code."
+}
+
+\end{chunk}
+
+\index{van der Walt, Paul}
+\begin{chunk}{axiom.bib}
+@mastersthesis{Walt12,
+ author = "van der Walt, Paul",
+ title = {{Reflection in Agda}},
+ school = "Utrecht University",
+ year = "2012",
abstract =
 "The need for subtyping in typesystems with dependent
 types has been realized for some years. But it is hard to
 prove that systems combining the two features have
 fundamental properties such as subject reduction. Here we in
 vestigate a subtyping extension of the system $\lambda$P
 which is an abstract version of the type system of the
 Edinburgh Logical Framework LF. By using an equivalent
 formulation, we establish some important properties of
 the new system $\lambda{\rm P}_{\le}$ including subject
 reduction. Our analysis culminates in a complete and
 terminating algorithm which establishes the decidability
 of typechecking",
 paper = "Aspi01.pdf",
+ "This project explores the recent addition to Agda enabling
+ reflection, in the style of Lisp, MetaML, and Template Haskell. It
+ illustrates several possible applications of reflection that arise in
+ dependently typed programming, and details the limitations of the
+ current implementation of reflection. Examples of typesafe
+ metaprograms are given that illustrate the power of reflection coupled
+ with a dependently typed language. Among other things the limitations
+ inherent in having quote and unquote implemented as keywords are
+ highlighted. The fact that lambda terms are returned without typing
+ information is discussed, and a solution is presented. Also provided
+ is a detailed users’ guide to the reflection API and a library of
+ working code examples to illustrate how various common tasks can be
+ performed, along with suggestions for an updated reflection API in a
+ future version of Agda.",
+ paper = "Walk12.pdf",
keywords = "printed"
}
\end{chunk}
\index{Gordon, Michael J.}
\index{Milner, Arthur J.}
\index{Wadsworth, Christopher P.}
+\index{Christiansen, David Thrane}
\begin{chunk}{axiom.bib}
@book{Gord79,
 author = "Gordon, Michael J. and Milner, Arthur J. and
 Wadsworth, Christopher P.",
 title = "Edinburgh LCF",
 publisher = "SpringerVerlag",
 year = "1979",
 isbn = "9783540097242"
+@misc{Chri18,
+ author = "Christiansen, David Thrane",
+ title = {{A Little Taste of Dependent Types}},
+ year = "2018",
+ link = "\url{https://www.youtube.com/watch?v=VxINoKFmS4}",
+ abstract =
+ "Dependent types let us use the same programming language for
+ compiletime and runtime code, and are inching their way towards the
+ mainstream from research languages like Coq, Agda and Idris. Dependent
+ types are useful for programming, but they also unite programming and
+ mathematical proofs, allowing us to use the tools and techniques we
+ know from programming to do math.
+
+ The essential beauty of dependent types can sometimes be hard to find
+ under layers of powerful automatic tools. The Little Typer is an
+ upcoming book on dependent types in the tradition of the The Little
+ Schemer that features a tiny dependently typed language called Pie. We
+ will demonstrate a proof in Pie that is also a program."
}
\end{chunk}
\index{Gordon, Michael J.}
\index{Milner, Arthur J.}
\index{Wadsworth, Christopher P.}
+\index{Gurevich, Yuri}
\begin{chunk}{axiom.bib}
@incollection{Gord79a,
 author = "Gordon, Michael J. and Milner, Arthur J. and
 Wadsworth, Christopher P.",
 title = "Front Matter",
 booktitle = "Edinburgh LCF",
 publisher = "SpringerVerlag",
 year = "1979",
 isbn = "9783540097242",
 pages = "18",
 paper = "Gord79a.pdf"
+@article{Gure18,
+ author = "Gurevich, Yuri",
+ title = {{ChurchTuring Thesis Cannot Possibly Be True}},
+ year = "2018",
+ link =
+ "\url{https://www.microsoft.com/enus/research/video/churchturingthesiscannotpossiblybetrue/}",
+ abstract =
+ "The thesis asserts this: If an algorithm A computes a partial
+ function f from natural numbers to natural numbers then f is partially
+ recursive, i.e., the graph of f is recursively enumerable.
+
+ The thesis has been formulated in 1930s. The only algorithms at the
+ time were sequential algorithms. Sequential algorithms were
+ axiomatized in 2000. This axiomatization was used in 2008 to prove the
+ thesis for sequential algorithms, i.e., for the case where A ranges
+ over sequential algorithms.
+
+ These days, in addition to sequential algorithms, there are parallel
+ algorithms, distributed algorithms, probabilistic algorithms, quantum
+ algorithms, learning algorithms, etc.
+
+ The question whether the thesis is true in full generality is actively
+ discussed from 1960s. We argue that, in full generality, the thesis
+ cannot possibly be true."
}
\end{chunk}
\index{Gordon, Michael J.}
\index{Milner, Arthur J.}
\index{Wadsworth, Christopher P.}
+\index{Ellis, Ferris}
\begin{chunk}{axiom.bib}
@incollection{Gord79b,
 author = "Gordon, Michael J. and Milner, Arthur J. and
 Wadsworth, Christopher P.",
 title = "Introduction",
 booktitle = "Edinburgh LCF",
 publisher = "SpringerVerlag",
 year = "1979",
 isbn = "9783540097242",
 pages = "112",
 paper = "Gord79b.pdf"
+@misc{Elli18,
+ author = "Ellis, Ferris",
+ title = {{Strength in Numbers: Unums and the Quest for Reliable
+ Arithmetics}},
+ year = "2018",
+ link = "\url{https://www.youtube.com/watch?v=nVNYjmj_qbY}",
+ abstract =
+ "In the land of computer arithmetic, a tyrant has ruled since its very
+ beginning: the floating point number. Under its rule we have all
+ endured countless hardships and cruelties. To this very day the
+ floating point number still denies 0.1 + 0.2 == 0.3 and returns
+ insidious infinities to software developers everywhere. But a new hero
+ has entered fray: the universal number (unum). Can it topple the float
+ number system and its century long reign?
+
+ This talk will introduce unums, explain their benefits over floating
+ point numbers, and examine multiple real world examples comparing the
+ two. For those not familiar with floating point numbers and their
+ pitfalls, this talk also includes a primer on the topic. Code examples
+ are in Rust, though strong knowledge of the language is not needed."
}
\end{chunk}
\index{Gordon, Michael J.}
\index{Milner, Arthur J.}
\index{Wadsworth, Christopher P.}
+\index{Weirich, Stephanie}
\begin{chunk}{axiom.bib}
@incollection{Gord79c,
 author = "Gordon, Michael J. and Milner, Arthur J. and
 Wadsworth, Christopher P.",
 title = "ML",
 booktitle = "Edinburgh LCF",
 publisher = "SpringerVerlag",
 year = "1979",
 isbn = "9783540097242",
 pages = "1361",
 paper = "Gord79c.pdf"
+@misc{Weir18,
+ author = "Weirich, Stephanie",
+ title = {{Dependent Types in Haskell}},
+ year = "2018",
+ link = "\url{https://www.youtube.com/watch?v=wNa3MMbhwS4}",
+ abstract =
+ "What has dependent type theory done for Haskell? Over the past ten
+ years, the Glasgow Haskell compiler (GHC) has adopted many type system
+ features inspired by dependent type theory. In this talk, I will
+ discuss the influence of dependent types on the design of GHC and on
+ the practice of Haskell programmers. In particular, I will walk
+ through an extended example and use it to analyze what it means to
+ program with with dependent types in Haskell. Throughout, I will will
+ discuss what we have learned from this experiment in language design:
+ what works now, what doesn't work yet, and what surprised us along
+ the way."
}
\end{chunk}
\index{Gordon, Michael J.}
\index{Milner, Arthur J.}
\index{Wadsworth, Christopher P.}
+\index{Christiansen, David Thrane}
\begin{chunk}{axiom.bib}
@incollection{Gord79d,
 author = "Gordon, Michael J. and Milner, Arthur J. and
 Wadsworth, Christopher P.",
 title = "PPLAMBDA",
 booktitle = "Edinburgh LCF",
 publisher = "SpringerVerlag",
 year = "1979",
 isbn = "9783540097242",
 pages = "6286",
 paper = "Gord79d.pdf"
+@misc{Chri18a,
+ author = "Christiansen, David Thrane",
+ title = {{Coding for Types: The Universe Pattern in Idris}},
+ year = "2018",
+ link = "\url{https://www.youtube.com/watch?v=AWeT_G04a0A}"
}
\end{chunk}
\index{Gordon, Michael J.}
\index{Milner, Arthur J.}
\index{Wadsworth, Christopher P.}
+\index{Altenkirch, Thorsten}
\begin{chunk}{axiom.bib}
@incollection{Gord79e,
 author = "Gordon, Michael J. and Milner, Arthur J. and
 Wadsworth, Christopher P.",
 title = "APPENDIX",
 booktitle = "Edinburgh LCF",
 publisher = "SpringerVerlag",
 year = "1979",
 isbn = "9783540097242",
 pages = "87159",
 paper = "Gord79e.pdf"
+@misc{Alte18,
+ author = "Altenkirch, Thorsten",
+ title = {{Naive Type Theory}},
+ year = "2018",
+ link = "\url{https://www.youtube.com/watch?v=bNG53SA4n48p}"
}
\end{chunk}
\index{Reynolds, John C.}
+\index{Hemann, Jason}
+\index{Friedman, Daniel}
\begin{chunk}{axiom.bib}
@misc{Reyn94,
 author = "Reynolds, John C.",
 title = {{An Introduction to the Polymorphic Lambda Calculus}},
 year = "1994",
 paper = "Reyn94.pdf",
+@misc{Hema14,
+ author = "Hemann, Jason and Friedman, Daniel",
+ title = {{Write the Other Half of Your Program}},
+ year = "2014",
+ link = "\url{https://www.youtube.com/watch?v=RG9fBbQrVOM}"
+}
+
+\end{chunk}
+
+\index{McKenna, Brian}
+\begin{chunk}{axiom.bib}
+@misc{Mcke14,
+ author = "McKenna, Brian",
+ title = {{Idris: Practical Dependent Types with Practical Examples}},
+ year = "2014",
+ link = "\url{https://www.youtube.com/watch?v=4i7KrG1Afbk}"
+ abstract =
+ "Dependent types turn types into a firstclass language construct and
+ allows types to be predicated upon values. Allowing types to be
+ controlled by ordinary values allows us to prove many more properties
+ about our programs and enables some interesting forms of
+ metaprogramming. We can do interesting things like write a typesafe
+ printf or verify algebraic laws of data structures  but while
+ dependent types are quickly gaining in popularity, it can be tricky to
+ find examples which are both useful and introductory.
+
+ This talk will demonstrate some practical dependent typing examples
+ (and not sized vectors) using Idris, a language designed for writing
+ executable programs, rather than just proving theorems."
+}
+
+\end{chunk}
+
+\index{Brady, Edwin}
+\begin{chunk}{axiom.bib}
+@misc{Brad17,
+ author = "Brady, Edwin",
+ title = {{Dependent Types in the Idris Programming Language 1}},
+ year = "2017",
+ link = "\url{https://www.cs.uoregon.edu/research/summerschool/summer17/topics.php}"
+}
+
+\end{chunk}
+
+\index{Brady, Edwin C.}
+\begin{chunk}{axiom.bib}
+@phdthesis{Brad05,
+ author = "Brady, Edwin C.",
+ title = {{Practical Implementation of a Dependently Typed Functional
+ Programming Language}},
+ school = "University of Durham",
+ year = "2005",
+ link = "\url{https://eb.host.cs.standrews.ac.uk/writings/thesis.pdf}",
+ abstract =
+ "Types express a program’s meaning, and checking types ensures that a
+ program has the intended meaning. In a dependently typed programming
+ language types are predicated on values, leading to the possibility of
+ expressing invariants of a program’s behaviour in its type. Dependent
+ types allow us to give more detailed meanings to programs, and hence
+ be more confident of their correctness.
+
+ This thesis considers the practical implementation of a dependently
+ typed programming language, using the Epigram notation defined by
+ McBride and McKinna. Epigram is a high level notation for dependently
+ typed functional programming elaborating to a core type theory based
+ on Luo’s UTT, using Dybjer’s inductive families and elimination rules
+ to implement pattern matching. This gives us a rich framework for
+ reasoning about programs. However, a na ̈ıve implementation introduces
+ several runtime overheads since the type sys tem blurs the
+ distinction between types and values; these overheads include the
+ duplication of values, and the storage of redundant information and
+ explicit proofs.
+
+ A practical implementation of any programming language should be as
+ efficient as pos sible; in this thesis we see how the apparent
+ efficiency problems of dependently typed pro gramming can be overcome
+ and that in many cases the richer type information allows us to apply
+ optimisations which are not directly available in traditional
+ languages. I introduce three storage optimisations on inductive
+ families; forcing, detagging and collapsing. I further introduce a
+ compilation scheme from the core type theory to Gmachine code,
+ including a pattern matching compiler for elimination rules and a
+ compilation scheme for efficient run time implementation of Peano’s
+ natural numbers. We also see some low level optimisations for removal
+ of identity functions, unused arguments and impossible case branches.
+ As a result, we see that a dependent type theory is an effective base
+ on which to build a feasible programming language.",
+ paper = "Brad05.pdf"
+}
+
+\end{chunk}
+
+\index{Soare, Robert I.}
+\begin{chunk}{axiom.bib}
+@inbook{Soar99,
+ author = "Soare, Robert I.",
+ title = {{The History and Concept of Computability}},
+ booktitle = "Handbook of Computability Theory",
+ publisher = "NorthHolland",
+ link = "\url{http://www.people.cs.uchicago.edu/~soare/History/handbook.pdf}",
+ year = "1999",
+ paper = "Soar99.pdf",
keywords = "printed"
}
\end{chunk}
\index{Meili, Mario}
+\index{Post, Emil L.}
\begin{chunk}{axiom.bib}
@misc{Meilxx,
 author = "Meili, Mario",
 title = {{Polymorphic Lambda Calculus}},
 year = "unknown",
 link = "\urlhttp://wiki.ifs.hsr.ch/SemProgAnTr/files/mmeili_polymorphic_lambda_calculus_final.pdf}",
 paper = "Meilxx.pdf",
+@article{Post44,
+ author = "Post, Emil L.",
+ title = {{Recursively Enumerable Sets of Postive Integers and Their
+ Decision Problems}},
+ journal = "Bull. Amer. Math Soc.",
+ volume = "50",
+ pages = "284316",
+ year = "1944",
+ paper = "Post44.pdf",
keywords = "printed"
}
\end{chunk}
\index{Wadler, Philip}
+\index{Soare, Robert I.}
\begin{chunk}{axiom.bib}
@misc{Wadl03,
 author = "Wadler, Philip",
 title = {{The GirardReynolds Isomorphism}},
 journal = "Information and Computation",
 volume = "186",
 number = "2",
 pages = "260280",
 year = "2003",
 abstract =
 "The secondorder polymorphic lambda calculus, F2, was
 independently discovered by Girard and Reynolds. Girard
 additionally proved a {\sl representation} theorem: every function
 on natural numbers that can be proved total in secondorder
 intutionistic propositional logic, P2, can be represented in
 F2. Reynolds additionally proved an {\sl abstraction} theorem: for
 a suitable notion of logical relation, every term in F2 takes
 related arguments into related results. We observe that the
 essence of Girard's result is a projection from P2 into F2, and
 that the essence of Reynolds's result is an embedding of F2 into
 P2, and that the Reynolds embedding followed by the Girard
 projection is the identity. The Girard projection discards all
 firstorder quantifiers, so it seems unreasonable to expect that
 the Girard projection followed by the Reynolds embedding should
 also be the identity. However, we show that in the presence of
 Reynolds's parametricity property that this is indeed the case,
 for propositions corresponding to inductive definitions of
 naturals, products, sums, and fixpoint types.",
 paper = "Wadl03.pdf",
 keywords = "printed"
}

\end{chunk}

\index{Barendregt, Hendrik Pieter}
\begin{chunk}{axiom.bib}
@article{Bare91,
 author = "Barendregt, Hendrik Pieter",
 title = {{An Introduction to Generalized Type Systems}},
 journal = "Journal of Functional Programming",
 volume = "1",
 number = "2",
 year = "1991",
 pages = "125154",
+@inprocedings{Soar07,
+ author = "Soare, Robert I.",
+ title = {{Computability and Incomputability}},
+ booktitle = "Proc. Third Conf. on Computability in Europe}},
+ year = "2007",
+ publisher = "SpringerVerlog",
+ isbn = "139783540730002",
+ link = "\url{http://www.people.cs.uchicago.edu/~soare/History/siena.pdf}",
abstract =
 "Programming languages often come with type systems. Some of these are
 simple, others are sophisticated. As a stylistic representation of
 types in programming languages several versions of typed lambda
 calculus are studied. During the last 20 years many of these systems
 have appeared, so there is some need of classification. Working
 towards a taxonomy, Barendregt (1991) gives a finestructure of the
 theory of constructions (Coquand and Huet 1988) in the form of a
 canonical cube of eight type systems ordered by inclusion. Berardi
 (1988) and Terlouw (1988) have independently generalized the method of
 constructing systems in the λcube. Moreover, Berardi (1988, 1990)
 showed that the generalized type systems are flexible enough to
 describe many logical systems. In that way the wellknown
 propositionsastypes interpretation obtains a nice canonical form.",
 paper = "Bare91.pdf",
+ "The conventional wisdom presented in most computability books and
+ historical papers is that there were several researchers in the early
+ 1930’s working on various precise definitions and demonstrations of a
+ function specified by a finite procedure and that they should all
+ share approximately equal credit. This is incorrect. It was Turing
+ alone who achieved the characterization, in the opinion of G ̈odel. We
+ also explore Turing’s oracle machine and its analogous properties in
+ analysis.",
+ paper = "Soar07.pdf",
keywords = "printed"
}
\end{chunk}
\index{Arkoudas, Konstantine}
\index{Musser, David}
+\index{Soare, Robert I.}
\begin{chunk}{axiom.bib}
@book{Arko17,
 author = "Arkoudas, Konstantine and Musser, David",
 title = {{Fundamental Proof Methods in Computer Science}},
 publisher = "MIT Press",
 year = "2017",
 isbn = "9780262035538"
+@article{Soar96,
+ author = "Soare, Robert I.",
+ title = {{Computability and Recursion}},
+ journal = "Bulletin of Symbolic Logic",
+ volume = "2",
+ year = "1996",
+ pages = "284321",
+ link = "\url{http://www.people.cs.uchicago.edu/~soare/History/compute.pdf}",
+ abstract =
+ "We consider the informal concept of 'computability' or 'effective
+ calculability' and two of the formalisms commonly used to define
+ it, '(Turing) computability' and '(general) recursiveness'. We
+ consider their origin, exact technical definition, concepts,
+ history, general English meanings, how they became fixed in their
+ present roles, how they were first and are now used, their impact
+ on nonspecialists, how their use will affect the future content of
+ the subject of computability theory, and its connection to other
+ related areas.
+
+ After a careful historical and conceptual analysis of
+ computability and recursion we make several recommentations in
+ section 7 about preserving the {\sl intensional} differences
+ between the concepts of 'computability' and
+ 'recursion'. Specificually we recommend that: the term 'recursive'
+ should no longer carry the additional meaning of 'computable' or
+ 'decidable', functions defined using Turing machines, register
+ machines, or their variants should be called 'computable' rather
+ than 'recursive', we should distinguish the intensional difference
+ between Church's Thesis and Turing's Thesis, and use the latter
+ particluarly in dealing with mechanistic questions; the name of
+ the subject should be '{\sl Computability Theory}' or simply
+ {\sl Computability} rather than 'Recursive Function Theory'",
+ paper = "Soar96.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{van Heijenoort, Jean}
+\begin{chunk}{axiom.bib}
+@book{Heij67,
+ author = "van Heijenoort, Jean",
+ title = {{From Frege to Godel}},
+ publisher = "Harvard University Press",
+ isbn = "0674324498",
+ year = "1967"
+}
+
+\end{chunk}
+
+\index{Boolos, George S.}
+\index{Burgess, John P.}
+\index{Jeffrey, Richard C.}
+\begin{chunk}{axiom.bib}
+@book{Bool07,
+ author = "Boolos, George S. and Burgess, John P. and Jeffrey, Richard C.",
+ title = {{Computability and Logic}},
+ publisher = "Cambridge University Press",
+ year = "2007",
+ isbn = "9780521701464"
+}
+
+\end{chunk}
+
+\index{Copeland, B. Jack}
+\begin{chunk}{axiom.bib}
+@book{Cope04,
+ author = "Copeland, B. Jack",
+ title = {{The Essential Turing}},
+ publisher = "Oxford University Press",
+ year = "2004",
+ isbn = "9780198250807"
+}
+
+\end{chunk}
+
+\index{Barwise, Jon}
+\index{Moss, Lawrence}
+\begin{chunk}{axiom.bib}
+@book{Barw96,
+ author = "Barwise, Jon and Moss, Lawrence",
+ title = {{Vicious Circles}}
+ publisher = "CSLI Publications",
+ year = "1996",
+ isbn = "1575860082"
+
+\end{chunk}
+
+\index{Harper, Robert}
+\begin{chunk}{axiom.bib}
+@misc{Harp16,
+ author = "Harper, Robert",
+ title = {{Practical Foundations for Progamming Languages}},
+ publisher = "Cambridge University Press",
+ year = "2016",
+ isbn = "9781107150300"
+}
+
+\end{chunk}
+
+\index{Hilbert, David}
+\index{CohnVossen, S.}
+\begin{chunk}{axiom.bib}
+@book{Hilb52,
+ author = "Hilbert, David and CohnVossen, S.",
+ title = {{Geometry and the Imagination}},
+ publisher = "Chelsea Publishing Company",
+ year = "1952"
+}
+
+\end{chunk}
+
+\index{Heyting, A.}
+\begin{chunk}{axiom.bib}
+@book{Heyt56,
+ author = "Heyting, A.",
+ title = {{Intuitionism: An Introduction}},
+ publisher = "North Holland",
+ year = "1956"
+}
+
+\end{chunk}
+
+\index{Jeffrey, Richard}
+\begin{chunk}{axiom.bib}
+@book{Jeff81,
+ author = "Jeffrey, Richard",
+ title = {{Formal Logic: Its Scope and Limits}},
+ publisher = "McGrawHill",
+ year = "1981",
+ isbn = "0070323216"
+}
+
+\end{chunk}
+
+\index{Cosmo, Roberto Di}
+\begin{chunk}{axiom.bib}
+@book{Cosm95,
+ author = "Cosmo, Roberto Di",
+ title = {{Isomorphisms of TYpes}},
+ publisher = "Birkhauser",
+ year = "1995",
+ isbn = "081763763X"
+}
+
+\end{chunk}
+
+\index{Winkler, Franz}
+\begin{chunk}{axiom.bib}
+@book{Wink84,
+ author = "Winkler, Franz",
+ title = {{The Church Rosser Property in Computer Algebra and Special
+ Theorem Proving}},
+ publisher = "Wien",
+ year = "1984",
+ isbn = "3853695841"
+}
+
+\end{chunk}
+
+\index{Mills, Bruce}
+\begin{chunk}{axiom.bib}
+@book{Mill06,
+ author = "Mills, Bruce",
+ title = {{Theoretical Introduction to Programming}},
+ publisher = "Springer",
+ year = "2006",
+ isbn = "1846280214"
+}
+
+\end{chunk}
+
+\index{Platzer, Andre}
+\begin{chunk}{axiom.bib}
+@book{Plat18,
+ author = "Platzer, Andre",
+ title = {{Logical Foundations of CyberPhysical Systems}},
+ publisher = "Springer",
+ year = "2018",
+ isbn = "9783319635873"
+}
+
+\end{chunk}
+
+\index{Yoshida, Masaaki}
+\begin{chunk}{axiom.bib}
+@book{Yosh97,
+ author = "Yoshida, Masaaki",
+ title = {{Hypergeometric Functions, My Love}},
+ publisher = "Springer",
+ year = "1997",
+ isbn = "9783322901682"
+}
+
+\end{chunk}
+
+\index{Dijkstra, Edsger W.}
+\index{Scholten, Carel S.}
+\begin{chunk}{axiom.bib}
+@book{Dijk90,
+ author = "Dijkstra, Edsger W. and Scholten, Carel S.",
+ title = {{Predicate Calculus and Program Semantics}},
+ publisher = "Springer",
+ year = "1990",
+ isbn = "0387969578"
+}
+
+\end{chunk}
+
+\index{Ehrig, Hartmut}
+\index{Mahr, Bernd}
+\begin{chunk}{axiom.bib}
+@book{Ehri85a,
+ author = "Ehrig, Hartmut and Mahr, Bernd",
+ title = {{Fundamentals of Algebraic Specification 2: Module
+ Specifications and Constraints}},
+ publisher = "Springer Verlag",
+ year = "1985",
+ isbn = "0387517995"
}
\end{chunk}
\index{Dahl, O.J.}
\index{Dijkstra, E.W.}
\index{Hoare, C.A.R}
+\index{Boyer, Robert S.}
+\index{Moore, J Strother}
\begin{chunk}{axiom.bib}
@book{Dahl72a,
 author = "Dahl, O.J. and Dijkstra, E.W. and Hoare, C.A.R",
 title = {{Structured Programming}},
+@book{Boye81a,
+ author = "Boyer, Robert S. and Moore, J Strother",
+ title = {{The Correctness Problem in Computer Science}},
publisher = "Academic Press",
 year = "1972",
 isbn = "0122005562"
+ year = "1981",
+ isbn = "0121229203"
}
\end{chunk}
\index{Nipkow, Tobias}
\index{Tabacznyj, Christophe}
\index{Paulson, Lawrence C.}
\index{Chaieb, Amine}
\index{Rasmussen, Thomas M.}
\index{Avigad, Jeremy}
+\index{Dijkstra, Edsger W.}
+\index{Feijen, W.H.J}
\begin{chunk}{axiom.bib}
@misc{Nipk18,
 author = "Nipkow, Tobias and Tabacznyj, Christophe and
 Paulson, Lawrence C. and Chaieb, Amine and Rasmussen, Thomas M.
 and Avigad, Jeremy",
 title = {{GCD in Isabelle}},
 link = "\url{http://isabelle.in.tum.ed/dist/library/HOL/HOL/GCD.html}",
 year = "2018"
+@book{Dijk88,
+ author = "Dijkstra, Edsger W. and Feijen, W.H.J",
+ title = {{A Method of Programming}},
+ publisher = "Addison Wesley",
+ year = "1988",
+ isbn = "0201175363"
}
\end{chunk}
\index{McCarthy, John}
\index{Abrahams, Paul W.}
\index{Edwards, Daniel J.}
\index{Hart, Timothy P.}
\index{Levin, Michael I.}
+\index{Kiczales, Gregor}
+\index{des Rivieres, Jim}
+\index{Bobrow, Daniel G.}
\begin{chunk}{axiom.bib}
@book{Mcca62,
 author = "McCarthy, John and Abrahams, Paul W. and Edwards, Daniel J.
 and Hart, Timothy P. and Levin, Michael I.",
 title = {{LISP 1.5 Programmer's Manual}},
 link = "\url{http://www.softwarepreservation.org/projects/LISP/book/LISP%201.5%20Programmers%20Manual.pdf}",
+@book{Kicz91,
+ author = "Kiczales, Gregor and des Rivieres, Jim and Bobrow, Daniel G.",
+ title = {{The Art of the Metaobject Protocol}},
publisher = "MIT Press",
 isbn = "0262130114",
 year = "1962"
+ year = "1991",
+ isbn = "0262610744"
+}
+
+\end{chunk}
+
+\index{Herken, Rolf}
+\begin{chunk}{axiom.bib}
+@book{Herk88,
+ author = "Herken, Rolf",
+ title = {{The Universal Turing Machine}},
+ publisher = "Oxford Science Publications",
+ year = "1988",
+ isbn = "0198537743"
+}
+
+\end{chunk}
+
+\index{Kripke, Saul A.}
+\begin{chunk}{axiom.bib}
+@book{Krip80,
+ author = "Kripke, Saul A.",
+ title = {{Naming and Necessity}},
+ publisher = "Harvard University Press",
+ year = "1980",
+ isbn = "0674598466"
+}
+
+\end{chunk}
+
+\index{Kleene, S.C.}
+\index{Vesley, R.E.}
+\begin{chunk}{axiom.bib}
+@book{Klee65,
+ author = "Kleene, S.C. and Vesley, R.E.",
+ title = {{The Foundations of Intuitionistic Mathematics}},
+ publisher = "NorthHolland Publishing Company",
+ year = "1965"
+}
+
+\end{chunk}
+
+\index{Dijkstra, Edsger W.}
+\begin{chunk}{axiom.bib}
+@misc{Dijk70,
+ author = "Dijkstra, Edsger W.",
+ title = {{Concern for Correctness as a Guiding Principle for Program
+ Composition}},
+ year = "1970",
+ comment = "EWD288"
+ link = "\url{https://www.cs.utexas.edu/users/EWD/transcriptions/EWD02xx/EWD288.html}",
+ paper = "Dijk70.html",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Hahnle, Reiner}
+\index{Wallenburg, Angela}
+\begin{chunk}{axiom.bib}
+@misc{Hahn04,
+ author = "Hahnle, Reiner and Wallenburg, Angela",
+ title = {{Using a Software Testing Technique to Improve Theorem Proving}},
+ year = "2004",
+ link = "\url{http://www.cse.chalmers.se/~angelaw/papers/lic.pdf}",
+ comment = "paper follows thesis in file",
+ abstract =
+ "Most efforts to combine formal methods and software testing go in the
+ direction of exploiting formal methods to solve testing problems, most
+ commonly test case generation. Here we take the reverse viewpoint and
+ show how the technique of partition testing can be used to improve a
+ formal proof technique (induction for correctness of loops). We first
+ compute a partition of the domain of the induction variable, based on
+ the branch predicates in the program code of the loop we wish to
+ prove. Based on this partition we derive mechanically a partitioned
+ induction rule, which is (hopefully) easier to use than the standard
+ induction rule. In particular, with an induction rule that is
+ tailored to the program to be verified, less user interaction can be
+ expected to be required in the proof. We demonstrate with a number of
+ examples the effectiveness of our method.".
+ paper = "Wall04.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Wallenburg, Angela}
+\begin{chunk}{axiom.bib}
+@phdthesis{Wall04,
+ author = "Wallenburg, Angela",
+ title = {{Inductive Rules for Proving Correctness of Imperative Programs}},
+ school = "Goteborg University",
+ link = "\url{http://www.cse.chalmers.se/~angelaw/papers/lic.pdf}",
+ abstract =
+ "This thesis is aimed at simplifying the userinteraction in
+ semiinteractive theorem proving for imperative programs. More
+ specically , we describe the creation of customised induction rules
+ that are tailormade for the specific program to verify and thus make
+ the resulting proof simpler. The concern is in user interaction,
+ rather than in proof strength. To achiev e this, two different
+ verication techniques are used.
+
+ In the first approach, we develop an idea where a software testing
+ technique, partition analysis, is used to compute a partition of the
+ domain of the induction variable, based on the branch predicates in
+ the program we wish to prove correct. Based on this partition we
+ derive mechanically a partitioned induction rule, which then inherits
+ the divideandconquer style of partition analysis, and (hopefully) is
+ easier to use than the standard (Peano) induction rule.
+
+ The second part of the thesis continues with a more thorough
+ development of the method. Here the connection to software testing is
+ completely removed and the focus is on inductive theorem proving only.
+ This time, we make use of failed proof attempts in a theorem prover
+ to gain information about the problem structure and create the
+ partition. Then, based on the partition we create an induction rule,
+ in destructor style, that is customised to make the proving of the
+ loop simpler.
+
+ With the customised induction rules, in comparison to standard (Peano)
+ induction or Noetherian induction, the required user interaction is
+ moved to an earlier point in the proof which also becomes more
+ modularised. Moreover, by using destructor style induction we
+ circumvent the problem of creating inverses of functions. The
+ soundness of the customised induction rules created by the method is
+ shown. Furthermore, the machinery of the theorem prover (KeY) is used
+ to make the method automatic. The induction rules are developed to
+ prove the total correctness of loops in an objectoriented language
+ and we concentrate on integers.",
+ paper = "Wall04.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Olsson, Ola}
+\index{Wallenburg, Angela}
+\begin{chunk}{axiom.bib}
+@msic{Olss04,
+ author = "Olsson, Ola and Wallenburg, Angela",
+ title = {{Automatic Generation of Customised Induction Rules for
+ Proving Correctness of Imperative Programs}},
+ year = "2004",
+ comment = "paper follows thesis in file",
+ link = "\url{http://www.cse.chalmers.se/~angelaw/papers/lic.pdf}",
+ abstract =
+ "In this paper we develop a method for automatic construction of
+ customised induction rules for use in a semiinteractiv e theorem
+ prover. The induction rules are developed to prove the total
+ correctness of loops in an objectoriented language. We concentrate
+ on integers. First we compute a partition of the domain of the
+ induction variable. Our method makes use of failed proof attempts in
+ the theorem prover to gain information about the problem structure and
+ create the partition. Then, based on this partition we create an
+ induction rule, in destructor style, that is customised to make the
+ proving of the loop simpler. Our concern is in user interaction,
+ rather than in proof strength.
+
+ Using the customised induction rules, some separation of proof of
+ control flow and data correctness is achieved, and we find that in
+ comparison to standard (Peano) induction or Noetherian induction,
+ simpler user interaction can be expected. Furthermore, by using
+ destructor style induction we circumvent the problem of creating
+ inverses of functions. We show the soundness of the customised
+ induction rules created by the method. Furthermore, we use the
+ machinery of the theorem prover (KeY) to make the method automatic.
+ Several interesting areas are also identified that could open up for
+ a larger range of loops the method can handle, as well as pointing
+ towards full automation of these cases.",
+ paper = "Wall04.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Olsson, Ola}
+\index{Wallenburg, Angela}
+\begin{chunk}{axiom.bib}
+@msic{Olss05,
+ author = "Olsson, Ola and Wallenburg, Angela",
+ title = {{Customised Induction Rules for Proving Correctness of
+ Imperative Programs}},
+ year = "2005",
+ link = "\url{http://www.cse.chalmers.se/~angelaw/papers/olswbg05.pdf}",
+ abstract =
+ "In this paper we develop a method for automatic construction of
+ customised induction rules for use in a semiinteractiv e theorem
+ prover. The induction rules are developed to prove the total
+ correctness of loops in an objectoriented language. We concentrate
+ on integers. First we compute a partition of the domain of the
+ induction variable. Our method makes use of failed proof attempts in
+ the theorem prover to gain information about the problem structure and
+ create the partition. Then, based on this partition we create an
+ induction rule, in destructor style, that is customised to make the
+ proving of the loop simpler. Our concern is in user interaction,
+ rather than in proof strength.
+
+ Using the customised induction rules, some separation of proof of
+ control flow and data correctness is achieved, and we find that in
+ comparison to standard (Peano) induction or Noetherian induction,
+ simpler user interaction can be expected. Furthermore, by using
+ destructor style induction we circumvent the problem of creating
+ inverses of functions. We show the soundness of the customised
+ induction rules created by the method. Furthermore, we use the
+ machinery of the theorem prover (KeY) to make the method automatic.
+ Several interesting areas are also identified that could open up for
+ a larger range of loops the method can handle, as well as pointing
+ towards full automation of these cases.",
+ paper = "Olss05.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Wallenburg, Angela}
+\begin{chunk}{axiom.bib}
+@misc{Wall09,
+ author = "Wallenburg, Angela",
+ title = {{Generalisation of Inductive Formulae based on Proving by
+ Symbolic Execution}},
+ year = "2009",
+ link = "\url{http://www.cse.chalmers.se/~angelaw/papers/awWING2009.pdf}",
+ abstract =
+ "Induction is a powerful method that can be used to prove the total
+ correctness of program loops. Unfortunately the induction proving
+ process in an interactive theorem prover is often very cumbersome. In
+ particular it can be difficult to find the right induction formula.
+ We describe a method for generalising induction formulae by analysing
+ a symbolic proof attempt in a semiinteractive firstorder theorem
+ prover. Based on the proof attempt we introduce universally
+ quantified variables, metavariables and sets of constraints on these.
+ The constraints describe the conditions for a successful proof. By
+ the help of examples, we outline some classes of problems and their
+ associated constraint solutions, and possible ways to automate the
+ constraint solving.",
+ paper = "Wall09.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Moy, Yannick}
+\index{Wallenburg, Angela}
+\begin{chunk}{axiom.bib}
+@misc{Moyx10,
+ author = "Moy, Yannick and Wallenburg, Angela",
+ title = {{Tokeneer: Beyond Formal Program Verification}},
+ year = "2010",
+ link = "\url{http://www.cse.chalmers.se/~angelaw/papers/ERTS2010.pdf}",
+ abstract =
+ "Tokeneer is a smallsized (10 kloc) security system which was
+ formally developed and verified by Praxis at the request of NSA, using
+ SPARK technology. Since its opensource release in 2008, only two
+ problems were found, one by static analysis, one by code review. In
+ this paper, we report on experiments where we systematically applied
+ various static analysis tools (compiler, bugfinder, proof tools)
+ and focused code reviews to all of the SPARK code (formally verified)
+ and supporting Ada code (not formally verified) of the Tokeneer
+ Project. We found 20 new problems overall, half of which are defects
+ that could lead to a system failure should the system be used in its
+ current state. Only two defects were found in SPARK code, which
+ confirms the benefits of applying formal verification to reach
+ higher levels of assurance. In order to leverage these benefits to
+ code that is was not formally verified from the start, we propose to
+ associate static analyses and dynamic analyses around a common
+ expression of properties and constraints. This is the goal of starting
+ project HiLite, which involves AdaCore and Altran Praxis together
+ with several industrial users and research labs.",
+ paper = "Moyx10.pdf",
+ keywords = "printed"
}
\end{chunk}
+
+\index{Baker, Henry G.}
+\begin{chunk}{axiom.bib}
+@article{Bake92,
+ author = "Baker, Henry G.",
+ title = {{A Decision Procedure for Common Lisp's SUBTYPEP Predicate}},
+ journal = "Lisp and Symbolic Computation",
+ volume = "5",
+ number = "3",
+ year = "1992",
+ pages = "157190"
+ link = "\url{}",
+ abstract =
+ "Common Lisp [CL84,CL90] includes a dynamic datatype system of
+ moderate complexity, as well as predicates for checking the types of
+ language objects. Additionally, an interesting predicate of two 'type
+ specifiers'— SUBTYPEP —is included in the language. This subtypep
+ predicate provides a mechanism with which to query the Common Lisp
+ type system regarding containment relations among the various builtin
+ and userdefined types. While subtypep is rarely needed by an
+ applications programmer, the efficiency of a Common Lisp
+ implementation can depend critically upon the quality of its subtypep
+ predicate: the runtime system typically calls upon subtypep to decide
+ what sort of representations to use when making arrays; the compiler
+ calls upon subtypep to interpret user declarations, on which efficient
+ data representation and code generation decisions are based.
+
+ As might be expected due to the complexity of the Common Lisp type
+ system, there may be type containment questions which cannot be
+ decided. In these cases subtypep is expected to return 'can't
+ determine', in order to avoid giving an incorrect
+ answer. Unfortunately, most Common Lisp implementations have abused
+ this license by answering 'can't determine' in all but the most
+ trivial cases. In particular, most Common Lisp implementations of
+ SUBTYPEP fail on the basic axioms of the Common Lisp type system
+ itself [CL84,p.33]. This situation is particularly embarrassing for
+ Lisp—the premier 'symbol processing language'—in which the
+ implementation of complex symbolic logical operations should be
+ relatively easy. Since subtypep was presumably included in Common Lisp
+ to answer the hard cases of type containment, this 'lazy evaluation'
+ limits the usefulness of an important language feature.
+
+ This paper shows how those type containment relations of Common Lisp
+ which can be decided at all, can be decided simply and quickly by a
+ decision procedure which can dramatically reduce the number of
+ occurrences of the 'can't determine' answer from subtypep . This
+ decision procedure does not require the conversion of a type specifier
+ expression to conjunctive or disjunctive normal form, and therefore
+ does not incur the exponential explosion in space and time that such a
+ conversion would entail.
+
+ The lattice mechanism described here for deciding subtypep is also
+ ideal for performing type inference [Baker90]; the particular
+ implementation developed here, however, is specific to the type system
+ of Common Lisp [Beer88].",
+ paper = "Bake92.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Haynes, Christopher T.}
+\index{Friedman, Daniel P.}
+\begin{chunk}{axiom.bib}
+@techreport{Hayn87,
+ author = "Haynes, Christopher T. and Friedman, Daniel P."
+ title = {{Embedding Continuations in Procedural Objects}},
+ type = "technical report",
+ institution = "Indiana University",
+ number = "213",
+ year = "1987",
+ abstract =
+ "Continuations, when available as firstclass objects, provide a
+ general control abstraction in programming languages. They
+ liberate the programmer from specific control structures,
+ increasing programming language extensibility. Such continuations
+ may be extended by embedding them in procedural objects. This
+ technique is first used to restore a fluid environment when a
+ continuation object is invoked. We then consider techniques for
+ constraining the power of continuations in the interest of
+ security and efficiency. Domain mechanisms, which create dynamic
+ barriers for enclosing control, are implemented using
+ fluids. Domains are then used to implement an unwindprotect
+ facility in the presence of firstclass continuations. Finally, we
+ present two mechanisms, windunwind and dynamicwind, that
+ generalizes unwindprotect.",
+ paper = "Hayn87.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Goldblatt, Robert}
+\begin{chunk}{axiom.bib}
+@book{Gold84,
+ author = "Goldblatt, Robert",
+ title = {{Topoi: The Categorical Analysis of Logic}},
+ year = "1984",
+ publisher = "Dover",
+ isbn = "9780486450261"
+}
+
+\end{chunk}
+
+\index{Clocksin, William F.}
+\begin{chunk}{axiom.bib}
+@book{Cloc91,
+ author = "Clocksin, William F.",
+ title = {{Clause and Effect}},
+ year = "1991",
+ publiser = "Springer",
+ isbn = "3540629718"
+}
+
+\end{chunk}
+
+\index{Brooks, Rodney A.}
+\index{Gabriel, Richard P.}
+\index{Steele, Guy L.}
+\begin{chunk}{axiom.bib}
+@article{Broo82a,
+ author = "Brooks, Rodney A. and Gabriel, Richard P. and Steele, Guy L.",
+ title = {{An Optimizing Compiler for Lexically Scoped LISP}},
+ journal = "ACM SIGPLAN Notices",
+ volume = "17",
+ number = "6",
+ year = "1982",
+ pages = "261275',
+ abstract =
+ "We are developing an optimizing compiler for a dialect of the
+ LISP language. The current target architecture is the S1, a
+ multiprocessing supercomputer designed at Lawrence Livermore
+ National Laboraty. While LISP is usually thought of as a language
+ primarily for symbolic processing and list manipulation, this
+ compiler is also intended to compete with the S1 PASCAL and
+ FORTRAN compilers for quality of compiled numerical code. The S1
+ is designed for extremely highspeed signal processing as well as
+ for symbolic computation; it provides primitive operations on
+ vectors of floatingpoint and complex numbers. The LISP compiler
+ is designed to exploit the architecture heavily.",
+
+ The compiler is structurally and conceptually similar to the
+ BLISS11 compiler and the compilers produced by PQCC. In
+ particular, the TNBIND technique has been borrowed and extended.
+
+ Particularly interesting properties of the compiler are:
+ \begin{itemize}
+ \item Extensive use of sourcetosource transformation
+ \item Use of an intermediate form that is expressionoriented
+ rather than statement oriented
+ \item Exploitation of tailrecursive function calls to represent
+ coplex control structures
+ \item Efficient compilation of code that can manipulate procedural
+ object that require heapallocated environments
+ \item Smooth runtime interfacing between the ``numerical world''
+ and ``LISP pointer world'', including automatic stack allocation
+ of objects that ordinarlly must be heapallocated
+ \end{itemize}
+
+ Each of these techniques has been used before, but we believe
+ their synthesis to be original and unique.
+
+ The compiler is tabledriven to a great extent, more so than
+ BLISS11 but less so than a PQCC compiler. We expect to be able to
+ redirect the compiler to other target architectures such as the
+ VAX or PDP10 with relatively little effort.",
+ paper = "Broo82a.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Brooks, Rodney A.}
+\index{Gabriel, Richard P.}
+\index{Steele, Guy L.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Broo82,
+ author = "Brooks, Rodney A. and Gabriel, Richard P. and Steele, Guy L.",
+ title = {{S1 Common Lisp Implementation}},
+ booktitle = "Proc ACM Symp. on Lisp and functional programming",
+ publisher = "ACM",
+ year = "1982",
+ isbn = "0897910826",
+ abstract =
+ "We are developing a Lisp implementation for the Lawrence
+ Livermore National Laboratory S1 Mark IIA computer. The dialect
+ of Lisp is an extension of COMMON Lisp [Steele;1982], a descendant
+ of Maclisp [Moon;1974] and Lisp Machine Lisp [Weinreb;1981].",
+ paper = "Broo82.pdf",
+ keywords = "printed"
+
+}
+
+\end{chunk}
+
+\index{Beer, Randall D.}
+\begin{chunk}{axiom.bib}
+@article{Beer87,
+ author = "Beer, Randall D.",
+ title = {{Preliminary Report on A Practical Type Inference System
+ for Common Lisp}},
+ journal = "Lisp Pointers",
+ volume = "1",
+ number = "2",
+ year = "1987",
+ pages = "511",
+ paper = "Beer87.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Brooks, Rodney A.}
+\index{Gabriel, Richard P.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Broo84,
+ author = "Brooks, Rodney A. and Gabriel, Richard P.",
+ title = {{A Critique of Common Lisp}},
+ booktitle = "Symposium on Lisp and Functional Programming",
+ pages = "18",
+ year = "1984",
+ abstract =
+ "A major goal of the COMMON LISP committee was to define a Lisp
+ language with sufficient power and generality that people would be
+ happy to stay within its confines and thus write inherently
+ transportable code. We argue that the resulting language
+ definition is too large for many shortterm and mediumterm
+ potential applications. In addition many parts of COMMON LISP
+ cannot be implemented very efficiently on stock hardware. We
+ further argue that the very generality of the design with its
+ different efficieny profiles on different architectures works
+ agains the goal of transportability.",
+ paper = "Broo84.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Cousot, Patrick}
+\index{Cousot, Radhia}
+\begin{chunk}{axiom.bib
+@inproceedings{Cous77,
+ author = "Cousot, Patrick and Cousot, Radhia",
+ title = {{Abstract Interpretation: A Unified Lattice Model for
+ Static Analysis of Programs by Construction or
+ Approximation of Fixpoints}},
+ booktitle = "Symp. on Principles of Programming Languages",
+ pages = "238252",
+ year = "1977",
+ paper = "Cous77.pdf",
+ keywords = "printed"
+}
+
+\end{chunk
+
+\index{Rees, Jonathan}
+\begin{chunk}{axiom.bib}
+@article{Rees82,
+ author = "Rees, Jonathan",
+ title = {{T: A Dialect of Lisp or, LAMBDA: The Ultimate Software Tool}},
+ year = "1982",
+ abstract =
+ "The T proejct is an experiment in language design and
+ implementation. Its purpose is to test the thesis developed by
+ Steele and Sussman in their series of papers about the Scheme
+ language: that Scheme may be used as the basis for a practical
+ programming language of exceptional expressive power, and that
+ implementations of Scheme could perform better than other Lisp
+ systems, and competitively with implementations of programming
+ languages, such as C and Bliss, which are usually considered to be
+ inherently more efficient than Lisp on conventaional machine
+ architectures. We are developing a portable implementation of T,
+ currently targeted for the VAX under the Unix and VMS operating
+ systems and for the Apollo, a MC68000based workstation.",
+ paper = "Rees82.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Shivers, Olin}
+\begin{chunk}{axiom.bib}
+@techreport{Shiv90,
+ author = "Shivers, Olin",
+ title = {{DataFlow Analysis and Type Recovery in Scheme}}
+, year = "1990",
+ type = "technical report",
+ institution = "Carnegie Mellon University",
+ number = "CMUCS90115",
+ abstract =
+ "The lack of explicit type information in Scheme prevents the
+ application of many compiler optimizations. Implicit type
+ information can oftentimes be recovered by analyzing the flow of
+ control through primitive operations and conditionals. Such flow
+ analysis, however, is difficult in the presence of higherorder
+ functions. This paper presents a technique for recovering type
+ information fro mScheme programs using flow analysis. The
+ algorithm used for flow analysis is semantically based, and is
+ novel in that it correctly handles the ``environment flow''
+ problem. Several examples of a working implementation of the
+ analysis are presented. The techniques are applicable not only to
+ Scheme type recovery, but also to related languages, such as
+ Common Lisp and Standard ML, and to related dataflow analyses,
+ such as range analysis, future analysis, and copy propagation.",
+ paper = "Shiv90.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Steenkiste, Peter}
+\index{Hennessy, John}
+\begin{chunk}{axiom.bib}
+@article{Stee87,
+ author = "Steenkiste, Peter and Hennessy, John",
+ title = {{Tags and Type Checking in LISP: Hardware and Software Approaches}},
+ journal = ACM SIGPLAN Notices",
+ volume = "22",
+ number = "10",
+ pages = "5059",
+ year = "1987",
+ abstract =
+ "One of the major factors that distinguishes LISP from many other
+ languages (Pascal, C, Fortran, etc.) is the need for runtime type
+ checking. Runtime type checking is implemented by adding to each
+ data object a tag that encodes type information. Tags must be
+ compared for type compatibility, removed when using the data, and
+ inserted when new data items are created. This tag manipulation,
+ together with other work related to dynamic type checking and
+ generic operations, constitutes a significant component of the
+ execution time of LISP programs. This has led both to the
+ development of LISP machines that support tag checking in hardware
+ and to the avoidance of type checking by users running on stock
+ hardware. To understand the role and necessity of specialpurpose
+ hardware for tag handling, we first measure the cost of type
+ checking operations for a group of LISP programs. We then examine
+ hardware and software implementations of tag operations and
+ estimate the cost of tag handling with the different tag
+ implementation schemes. The data shows that minimal levels of
+ support provide most of the benefits, and that tag operations can
+ be relatively inexpensive, even when no special hardware support
+ is present.",
+ paper = "Stee87.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Brooks, Rodney A.}
+\index{Posner, David B.}
+\index{McDonald, James L.}
+\index{White, Jon L.}
+\index{Benson, Eric},
+\index{Gabriel, Richard P.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Broo86,
+ author = "Brooks, Rodney A. and Posner, David B. and McDonald, James L.
+ and White, Jon L. and Benson, Eric} and Gabriel, Richard P.",
+ title = {{Design of An Optimising, Dynamically Retargetable Compiler
+ for Common Lisp}},
+ booktitle = "Conf. on Lisp and Functional Programming",
+ publisher = "ACM",
+ pages = "6785",
+ year = "1986",
+ abstract =
+ "We outline the components of a retargetable crosscompiler for
+ the Common Lisp language. A description is given of a method for
+ modeling the various hardware features in the compiler's database,
+ and a breakdown is shown of the compiler itself into various
+ machineindependent and machinedependent modules. A novel feature
+ of this development is the dynamic nature of the retargeting:
+ Databses for multiple hardware architectures are a standard part
+ of the compiler, and the internal interfaces used by the compiler
+ are such that the machinedependent modules may be instantly
+ switched from one to another. Examples of generated code in
+ several environments will be given to demonstrate the high quality
+ of the output available, even under this very modular approach..",
+ paper = "Broo86.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Kaplan, Marc A.}
+\index{Ullman, Jeffrey D.}
+\begin{chunk}{axiom.bib}
+@article{Kapl80,
+ author = "Kaplan, Marc A. and Ullman, Jeffrey D.",
+ title = {{A Scheme for the Automatic Inference ov Variable Types}},
+ journal = "J. ACM",
+ volume = "27",
+ number = "1",
+ pages = "128.145",
+ year = "1980",
+ abstract =
+ "In this paper an algorithm for the determination of runtime
+ types in a programming language requiring no type declarations is
+ presented. It is demonstrated that this algorithm is superior to
+ other published algorithms in the sense that it produces stronger
+ assertions about the set of possible types for variables than do
+ other known algorithms. In fact this algorithm is shown to be the
+ best possible algorithm from among all those that use the same set
+ of primitive operators.",
+ paper = "Kapl80.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Ma, KwanLiu}
+\index{Kessler, Robert R.}
+\begin{chunk}{axiom.bib}
+@article{Maxx90,
+ author = "Ma, KwanLiu and Kessler, Robert R.",
+ title = {{TICL  A Type Inference System for Common Lisp}},
+ journal = "Software  Practice and Experience",
+ volume = "20",
+ number = "6",
+ pages = "593623",
+ year = "1990",
+ abstract =
+ "Most current Common Lisp compilers generate more efficient code
+ when supplied with data type information. However, in keeping with
+ standard Lisp programming style, most programmers are reluctant to
+ provide type information; they simply allow the runtime type
+ system to managed the data types accordingly. To fill this gap, we
+ have designed and implemented a type inference system for Common
+ Lisp (TICL). TICL takes a Lisp program that has been annotated
+ with a few type declarations, adds as many declarations as
+ possible, and produces a type declared program. The compiler can
+ then use this information to generate more efficient
+ code. Measurements indicate that a 20 per cent speed improvement
+ can generally be achieved.",
+ paper = "Maxx90.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Henglein, Friedrich}
+\begin{chunk}{axiom.bib}
+@phdthesis{Heng89,
+ author = "Henglein, Friedrich",
+ title = {{Polymorphic Type Inference and SemiUnification}},
+ school = "Rutgers",
+ year = "1989",
+ link =
+ "\url{http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.388.1275&rep=rep1&type=pdf}",
+ abstract =
+ "In the last ten years declarationfree programming languages with
+ a polymorphic typing discipline (ML, B) have been developed to
+ approximate the flexibility and conciseness of dynamically typed
+ languages (LISP, SETL) while retaining the safety and execution
+ efficiency of conventional statically typed languages (Algol68,
+ Pascal). These polymorphic languages can be type checked at
+ compile time, yet allow functions whose arguments range over a
+ variety of types.
+
+ We investigate several polymorphic type systems, the most powerful
+ of which, termed MilnerMycroft Calculus, extends the socalled
+ letpolymorphism found in, e.g. ML with a polymorphic typing rule
+ for recursive defiitions. We show that semiunification, the
+ problem of solving inequalities over firstorder terms,
+ characterizes type checking in the MilnerMycroft Calculus to
+ polynomial time, even in the restricted case were nested
+ definitions are disallowed. This permits us to extend some
+ infeasibility results for related combinatorial problems to type
+ inference and to correct several claims and statements in the
+ literature.
+
+ We prove the existence of unique most general solutions of term
+ inequalities, called most general semiunifiers, and present an
+ algorithm for computing them that terminates for all known inputs
+ due to a novel ``extended occurs check''. We conjecture this
+ algorithm to be uniformly terminating even though, at present,
+ general semiunification is not known to be decidable. We prove
+ termination of our algorithm for a restricted case of
+ semiunification that is of independent interest.
+
+ Finally, we offer an explanation for the apparent practicality of
+ polymorphic type inference in the face of theoretical
+ intractability results.",
+ paper = "Heng89.pdf",
+}
+
+\end{chunk}
+
+\index{Nimmer, Jeremy W.}
+\index{Ernst, Michael D.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Nimm02,
+ author = "Nimmer, Jeremy W. and Ernst, Michael D.",
+ title = {{Automatic Generation of Program Specifications}},
+ booktitle = "Symp. on Software Testing and Analysis",
+ publisher = "ACM Press",
+ year = "2002",
+ abstract =
+ "Producing specifications by dynamic (runtime) analysis of program
+ executions is potentially unsound, because the analyzed executions may
+ not fully characterize all possible executions of the program. In
+ practice, how accurate are the results of a dynamic analysis? This
+ paper describes the results of an investigation into this question,
+ determining how much specifications generalized from program runs
+ must be changed in order to be verified by a static checker.
+ Surprisingly, small test suites captured nearly all program behavior
+ required by a specific type of static checking; the static checker
+ guaranteed that the implementations satisfy the generated specifications
+ , and ensured the absence of runtime exceptions. Measured
+ against this verification task, the generated specifications scored
+ over 90 percent on precision, a measure of soundness, and on recall, a
+ measure of completeness.
+
+ This is a positive result for testing, because it suggests that
+ dynamic analyses can capture all semantic information of interest for
+ certain applications. The experimental results demonstrate that a
+ specific technique, dynamic invariant detection, is effective at
+ generating consistent, sufficient specifications for use by a static
+ checker. Finally, the research shows that combining static and
+ dynamic analyses over program specifications has benefits for users of
+ each technique, guaranteeing soundness of the dynamic analysis and
+ lessening the annotation burden for users of the static analysis.",
+ paper = "Nimm02.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Hantler, Sidney L.}
+\index{King, James C.}
+\begin{chunk}{axiom.bib}
+@article{Hant76,
+ author = "Hantler, Sidney L. King, James C.",
+ title = {{An Introduction to Proving the Correctness of Programs}},
+ journal = "ACM Computing Surveys",
+ volume = "8",
+ number = "3",
+ pages = "331353",
+ year = "1976",
+ abstract =
+ "This paper explains, in an introductory fashion, the method of
+ specifying the correct behavior of a program by the use of
+ input/output assertions and describes one method for showing that the
+ program is correct with respect to those assertions. An initial
+ assertion characterizes conditions expected to be true upon entry to
+ the program and a final assertion characterizes conditions expected to
+ be true upon exit from the program. When a program contains no
+ branches, a technique known as symbolic execution can be used to show
+ that the truth of the initial assertion upon entry guarantees the
+ truth of the final assertion upon exit. More generally, for a program
+ with branches one can define a symbolic execution tree. If there is
+ an upper bound on the number of times each loop in such a program may
+ be executed, a proof of correctness can be given by a simple traversal
+ of the (finite) symbolic execution tree.
+
+ However, for most programs, no fixed bound on the number of times each
+ loop is executed exists and the corresponding symbolic execution trees
+ are infinite. In order to prove the correctness of such programs, a
+ more general assertion structure must be provided. The symbolic
+ execution tree of such programs must be traversed inductively rather
+ than explicitly. This leads naturally to the use of additional
+ assertions which are called ``inductive assertions.''",
+ paper = "Hant76.pdf",
+ keywords = "printed",
+}
+
+\end{chunk}
+
+\index{Wirth, Niklaus}
+\begin{chunk}{axiom.bib}
+@misc{Wirt15,
+ author = "Wirth, Niklaus",
+ title = {{The Design of a RISC Architecture and its Implementation
+ with an FPGA}},
+ link =
+ "\url{https://www.inf.ethz.ch/personal/wirth/FPGArelatedWork/RISC.pdf}",
+ year = "2015",
+ paper = "Wirt15.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Remy, Didier}
+\begin{chunk}{axiom.bib}
+@techreport{Remy92,
+ author = "Remy, Didier",
+ title = {{Extensions to ML type system with a sorted equation theory
+ on types}},
+ type = "research report",
+ institution = "INRIA",
+ number = "RR1766",
+ year = "1992",
+ abstract =
+ "We extend the ML language by allowing a sorted regular equational
+ theory on types for which unification is decidable and unitary. We
+ prove that the extension keeps principlal typings and subject
+ reduction. A new set of typing rules is proposed so that type
+ generalization is simpler and more efficient. We consider typing
+ problems as general unification problems, which we solve with a
+ formalism of unificands. Unificands naturally deal with sharing
+ between types and lead to a more efficient type inference
+ algorithm. The use of unificands also simplifies the proof of
+ correctness of the algorithm by splitting it into more elementary
+ steps.",
+ paper = "Remy92.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Atkey, Robert}
+\begin{chunk}{axiom.bib}
+@inproceedings{Atke18,
+ author = "Atkey, Robert",
+ title = {{Syntax and Semantics of Quantitative Type Theory}},
+ booktitle = "LICS",
+ year = "2018",
+ publisher = "ACM",
+ isbn = "9781450355834",
+ abstract =
+ "We present Quantitative Type Theory, a Type Theory that records
+ usage information for each variable in a judgement, based on a
+ previous system by McBride. The usage information is used to give
+ a realizability semantics using a variant of Linear Combinatory
+ Algebras, refining the usual realizability semantics of Type
+ Theory by accurately tracking resource behaviour. We define the
+ semantics in terms of Quantitative Categories with Families, a
+ novel extension of Categories with Families for modelling resource
+ sensitive type theories.",
+ paper = "Atke18.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Gabriel, Richard P.}
+\begin{chunk}{axiom.bib}
+@misc{Gabr10,
+ author = "Gabriel, Richard P.",
+ title = {{A Reivew of The Art of the Metaobject Protocol}},
+ year = "2010",
+ paper = "Gabr10.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Church, Alonzo}
+\begin{chunk}{axiom.bib}
+@book{Chur41,
+ author = "Church, Alonzo",
+ title = {{The Calculi of Lambda Conversion}},
+ year = "1941",
+ publisher = "Princeton University Press",
+ paper = "Alon41*.pdf"
+}
+
+\end{chunk}
+
+\index{Chow, Timothy Y.}
+\begin{chunk}{axiom.bib}
+@misc{Chow18,
+ author = "Chow, Timothy Y.",
+ title = {{The Consistency of Arithmetic}},
+ year = "2018",
+ link = "\url{http://timothychow.net/consistent.pdf}",
+ paper = "Chow18.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Torlak, Emina}
+\begin{chunk}{axiom.bib}
+@misc{Torl17,
+ author = "Torlak, Emina",
+ title = {{Synthesis and Verifcation for All}},
+ link = "\url{https://www.youtube.com/watch?v=KpDyuMIb_E0}",
+ year = "2017"
+}
+
+\end{chunk}
+
+\index{Burstall, R.M.}
+\begin{chunk}{axiom.bib}
+@article{Burs69,
+ author = "Burstall, R.M.",
+ title = {{Proving Properties of Programs by Structural Induction}},
+ journal = "Computer Journal",
+ volume = "12",
+ number = "1",
+ pages = "4148",
+ link = "\url{http://www.cse.chalmers.se/edu/year/2010/course/DAT140_Types/Burstall.pdf}",
+ year = "1969",
+ abstract =
+ "This paper discusses the technique of structural induction for
+ proving theorems about programs. This technique is closely related to
+ recursion induction but makes use of the inductive definition of the
+ data structures handled by the programs. It treats programs with
+ recursion but without assignments or jumps. Some syntactic extensions
+ to Landin's functional programming language ISWIM are suggested which
+ make it easier to program the manipulation of data structures and to
+ develop proofs about such programs. Two sample proofs are given to
+ demonstrate the technique, one for a tree sorting algorithm and one
+ for a simple compiler for expressions.",
+ paper = "Burs69.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Reynolds, John C.}
+\begin{chunk}{axiom.bib}
+@inbook{Reyn97,
+ author = "Reynolds, John C.",
+ title = {{The Essence of Algol}},
+ booktitle = "ALGOLlike languages, Volume 1",
+ publisher = "Birkhauser Boston Inc.",
+ isbn = "0817638806",
+ year = "1997",
+ abstract =
+ "Although Algol 60 has been uniquely in uential in programming
+ language design, its descendants have been signicantly dierent than
+ their prototype. In this paper, we enumerate the principles that we
+ believe embody the essence of Algol, describe a model that satises
+ these principles, and illustrate this model with a language that,
+ while more uniform and general, retains the character of Algol.",
+ paper = "Reyn97.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Sannella, Donald}
+\begin{chunk}{axiom.bib}
+@article{Sann86,
+ author = "Sannella, Donald",
+ title = {{Extended ML: An InstitutionIndependent Framework for
+ Formal Program Development}},
+ booktitle = "Proc. Workshop on Category Theory and Computer Programming",
+ journal = "LNCS",
+ volume = "240",
+ pages = "364389",
+ year = "1986",
+ paper = "Sann86.pdf"
+}
+
+\end{chunk}
+
+\index{Sannella, Donald}
+\index{Tarlecki, Andrzej}
+\begin{chunk}{axiom.bib}
+@article{Sann87,
+ author = "Sannella, Donald and Tarlecki, Andrzej",
+ title = {{On Observational Equivalence and Algebraic Specification}},
+ journal = "J. of Computer and System Sciences",
+ volume = "34",
+ pages = "150178",
+ year = "1987"
+ abstract =
+ "The properties of a simple and natural notion of observational
+ equivalence of algebras and the corresponding specificationbuilding
+ operation are studied. We begin with a defmition of observational
+ equivalence which is adequate to handle reachable algebras only, and
+ show how to extend it to cope with unreachable algebras and also how
+ it may be generalised to make sense under an arbitrary institution.
+ Behavioural equivalence is treated as an important special case of
+ observational equivalence, and its central role in program development
+ is shown by means of an example.",
+ paper = "Sann87.pdf"
+}
+
+\end{chunk}
+
+\index{Sannella, Donald}
+\index{Tarlecki, Andrzej}
+\begin{chunk}{axiom.bib}
+@article{Sann88,
+ author = "Sannella, Donald and Tarlecki, Andrzej",
+ title = {{Specifications in an Arbitrary Institution}},
+ journal = "Information and Computation",
+ volume = "76",
+ pages = "165210",
+ year = "1988",
+ abstract =
+ "A formalism for constructing and using axiomatic specifications in an
+ arbitrary logical system is presented. This builds on the framework
+ provided by Goguen and Burstall’s work on the notion of an institution
+ as a formalisation of the concept of a logical system for writing
+ specifications. We show how to introduce free variables into the
+ sentences of an arbitrary institution and how to add quantitiers which
+ bind them. We use this foundation to define a set of primitive
+ operations for building specifications in an arbitrary institution
+ based loosely on those in the ASL kernel specification language. We
+ examine the set of operations which results when the definitions are
+ instantiated in institutions of total and partial tirstorder logic
+ and compare these with the operations found in existing specification
+ languages. We present proof rules which allow proofs to be conducted
+ in specifications built using the operations we define. Finally, we
+ introduce a simple mechanism for defining and applying parameterised
+ specifications and briefly discuss the program development process.",
+ paper = "Sann88.pdf"
+}
+
+\end{chunk}
+
+\index{Sannella, Donald}
+\index{Tarlecki, Andrzej}
+\begin{chunk}{axiom.bib}
+@article{Sann89,
+ author = "Sannella, Donald and Tarlecki, Andrzej",
+ title = {{Toward Formal Development of ML Programs: Foundations and
+ Methodology (Extended Abstract}}},
+ year = "1989",
+ abstract =
+ "A methodology is presented for the formal development of modular
+ Standard ML programs from specifications. Program development proceeds
+ via a sequence of design (modular decomposition), coding and
+ refinement steps. For each of these three kinds of step, conditions
+ are given which ensure the correctness of the result. These conditions
+ seem to be as weak as possible under the constraint of being
+ expressible as local interface matching requirements.",
+ paper = "Sann89.pdf"
+}
+
+\end{chunk}
+
+\begin{chunk}{axiom.bib}
+@article{SCSCP10,
+ author = "Unknown",
+ title = {{Symbolic Computation Software Composability Project and
+ its implementations}},
+ journal = "ACM Comm. in Computer Algebra",
+ volume = "44",
+ number = "4",
+ year = "2010",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Damas, Luis}
+\index{Milner, Robin}
+\begin{chunk}{axiom.bib}
+@inproceedings{Dama82,
+ author = "Damas, Luis and Milner, Robin",
+ title = {{Principal Typeschemes for Functional Programs}},
+ booktitle = "Proc POPL '82",
+ year = "1982",
+ pages = "207212",
+ paper = "Dama82.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Mobarakeh, S. Saeidi}
+\begin{chunk}{axiom.bib}
+@misc{Moba09,
+ author = "Mobarakeh, S. Saeidi",
+ title = {{Type Inference Algorithms}},
+ year = "2009",
+ link = "\url{https://www.win.tue.nl/~hzantema/semssm.pdf}",
+ abstract =
+ "n this paper we are going to describe the Wand’s type inference
+ algorithm and we’ll try to extend this algorithm with the notion of
+ polymorphic let. By means of a type system, which we’re going to
+ extend with some constraint language, we are able to extend the
+ algorithm’s first phase (generation of equations) with
+ letpolymorphism. The second phase of the algorithm (solving of the
+ generated equations) needs some modifications to be done on standard
+ unification algorithms because the new generated equation constructs
+ can not directly be fed in to the standard unification algorithm. The
+ correctness of the first phase of the algorithm is been proved by
+ extending the Wand’s soundness and completeness prove. Finally a
+ simple example is given to clarify the idea behind the algorithm.",
+ paper = "Moba09.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Kreitz, Christoph}
+\index{Rahli, Vincent}
+\begin{chunk}{axiom.bib}
+@techreport{Kreix11,
+ author = "Kreitz, Christoph and Rahli, Vincent",
+ title = {{Introduction to Classic ML}},
+ type = "technical report",
+ institution = "Cornell University",
+ year = "2011",
+ paper = "Krei11.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Lee, Daniel K.}
+\index{Crary, Karl}
+\index{Harper, Robert}
+@inproceedings{Leex07,
+ author = "Lee, Daniel K. and Crary, Karl and Harper, Robert",
+ title = {{Towards a Mechanized Metatheory of Standard ML}},
+ booktitle = "POPL'07",
+ publisher = "ACM",
+ year = "2007",
+ abstract =
+ "We present an internal language with equivalent expressive power to
+ Standard ML, and discuss its formalization in LF and the
+ machinechecked verification of its type safety in Twelf. The
+ internal language is intended to serve as the target of elaboration in
+ an elaborative semantics for Standard ML in the style of Harper and
+ Stone. Therefore, it includes all the programming mechanisms
+ necessary to implement Standard ML, including translucent modules,
+ abstraction, polymorphism, higher kinds, references, exceptions,
+ recursive types, and recursive functions. Our successful formalization
+ of the proof involved a careful interplay between the precise
+ formulations of the various mechanisms, and required the invention of
+ new representation and proof techniques of general interest.",
+ paper = "Leex07.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Tarditi, David}
+\index{Morrisett, Greg}
+\index{Cheng, Perry}
+\index{Stone, Chris}
+\index{Harper, Roboert}
+\index{Lee, Peter}
+\begin{chunk}{axiom.bib}
+@inproceedings{Tard03,
+ author = "Tarditi, David and Morrisett, Greg and Cheng, Perry and
+ Stone, Chris and Harper, Roboert and Lee, Peter",
+ title = {{TIL: A TypeDirected, Optimizing Compiler for ML}},
+ booktitle = "20 Years of Prog. Lang. Design and Implementation",
+ year = "2003",
+ publisher = "ACM",
+ isbn = "1581136234",
+ paper = "Tard03.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Herman, David}
+\begin{chunk}{axiom.bib}
+@inproceedings{Herm07,
+ author = "Herman, David",
+ title = {{Functional Pearl: The Great Escape}},
+ booktitle = "ICFP'07",
+ publisher = "ACM",
+ isbn = "1595938152",
+ year = "2007",
+ abstract =
+ "Filinski showed that callcc and a single mutable reference cell
+ are sufficient to express the delimited control operators shift
+ and reset. However, this implementation interacts poorly with
+ dynamic bindings like exception handlers. We present a variation
+ on Filinski's encoding of delimited continuations that behaves
+ appropriately in the presence of exceptions and give an
+ implementation in Standard ML of New Jersey. We prove the encoding
+ correct with respect to the semantics of delimited dynamic binding.",
+ paper = "Herm07.pdf"
+}
+
+\end{chunk}
+
+\index{Rossberg, Andreas}
+\begin{chunk}{axiom.bib}
+@misc{Ross13,
+ author = "Rossberg, Andreas",
+ title = {{HaMLet: To Be Or Not To Be Standard ML}},
+ year = "2013",
+ link = "\url{https://people.mpisws.org/~rossberg/hamlet}",
+ paper = "Ross13.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Kahrs, Stefan}
+\index{Sannella, Donald}
+\index{Tarlecki, Andrzej}
+\begin{chunk}{axiom.bib}
+@article{Kahr97,
+ author = "Kahrs, Stefan and Sannella, Donald and Tarlecki, Andrzej",
+ title = {{The Definition of Extended ML}},
+ journal = "Theoretical Computer Science",
+ volume = "173",
+ pages = "445484",
+ year = "1997",
+ abstract =
+ "This document formally defines the syntax and semantics of the
+ Extended ML language. It is based on the published semantics of
+ Standard ML in an attempt to ensure compatibility between the two
+ languages.",
+ paper = "Kahr97.pdf"
+}
+
+\end{chunk}
+
+\index{Kahrs, Stefan}
+\index{Sannella, Donald}
+\begin{chunk}{axiom.bib}
+@article{Kahr98,
+ author = "Kahrs, Stefan and Sannella, Donald",
+ title = {{Reflections on the Design of a Specification Language}},
+ journal = "LNCS",
+ volume = "1382",
+ pages = "154170",
+ abstract =
+ "We reflect on our experiences from work on the design and
+ semantic underpinnings of Extended ML, a specification language
+ which supports the specification and formal development of
+ Standard ML programs. Our aim is to isolate problems and issues
+ that are intrinsic to the general enterprise of designing a
+ specification language for use with a given programming language.
+ Consequently the lessons learned go far beyond our original aim of
+ designing a specification language for ML.",
+ paper = "Karh98.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
\ No newline at end of file
diff git a/readme b/readme
index e0aba10..7233be0 100644
 a/readme
+++ b/readme
@@ 21,6 +21,11 @@ When you teach, you do something useful.
When you do research, most days you don't.
 June Huh IAS Princeton
+Therapist: So you're afraid that you're letting down people
+you've never met and who you've given something for free?
+Me: Yeah, basically
+ Matthew Garrett
+
You've unpacked the Axiom source code to some directory. In this
document we'll call that directory /home/me/axiom. Note that the path
cannot contain spaces.
diff git a/src/axiomwebsite/patches.html b/src/axiomwebsite/patches.html
index f65dd3a..b003331 100644
 a/src/axiomwebsite/patches.html
+++ b/src/axiomwebsite/patches.html
@@ 5970,6 +5970,8 @@ books/bookvolbib add references
books/bookvolbib add references
20181003.01.tpd.patch
src/input/kernel.input add additional Waldek examples
+20181211.01.tpd.patch
+books/bookvolbib add references

1.9.1