\documentclass[10pt,twocolumn]{article} % Fontsize 11 point
\usepackage{latexsym} % load symbols like \Box
%\pagestyle{empty}
\setlength{\textwidth}{6.875in}
\setlength{\columnsep}{0.375in}
\setlength{\oddsidemargin}{-.1875in}
\setlength{\textheight}{8.875in}
\setlength{\topmargin}{0in}
\setlength{\headheight}{0in}
\setlength{\headsep}{0in}
\setlength{\parindent}{12pt}
\makeatletter
\newcommand\sublarge{\@setfontsize\sublarge\@xipt{13}}
\def\@seccntformat#1{\csname the#1\endcsname.~~}
\def\section{\@startsection {section}{1}{0pt}{-3.25ex plus -1ex minus
-.2ex}{1.5ex plus .2ex}{\large\bf}}
\def\subsection{\@startsection {subsection}{2}{0pt}{-2ex plus -1ex minus
-.2ex}{1.5ex plus .2ex minus .3ex}{\sublarge\bf}}
\long\def\@makecaption#1#2{
\setbox\@tempboxa\hbox{\sf\bf #1. #2}
\ifdim \wd\@tempboxa >\hsize % IF longer than one line:
\sf\bf #1. #2\par % THEN set as ordinary paragraph.
\else % ELSE center.
\hbox to\hsize{\hfil\box\@tempboxa\hfil}
\fi
\vskip 8pt }
\makeatother
\frenchspacing
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% Theorem-like environments %%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newtheorem{defi}{Definition}
\newtheorem{theo}{Theorem}
\newtheorem{prop}{Proposition}
\newtheorem{lemm}{Lemma}
\newtheorem{coro}{Corollary}
\newtheorem{exam}{Example}
\newenvironment{definition}[1]{\begin{defi} \rm \label{df-#1} }{\end{defi}}
\newenvironment{theorem}[1]{\begin{theo} \rm \label{thm-#1} }{\end{theo}}
\newenvironment{proposition}[1]{\begin{prop} \rm \label{pr-#1} }{\end{prop}}
\newenvironment{lemma}[1]{\begin{lemm} \rm \label{lem-#1} }{\end{lemm}}
\newenvironment{corollary}[1]{\begin{coro} \rm \label{cor-#1} }{\end{coro}}
\newenvironment{example}[1]{\begin{exam} \rm \label{ex-#1} }{\end{exam}}
\newenvironment{proof}{\begin{trivlist} \item[\hspace{\labelsep}\bf Proof:]}{\hfill $\Box$\end{trivlist}}
\newenvironment{itemise}{\begin{list}{$\bullet$}{\leftmargin 18pt
\labelwidth\leftmargini\advance\labelwidth-\labelsep
\topsep 4pt \itemsep 2pt \parsep 2pt}}{\end{list}}
\newenvironment{itemise2}{\begin{list}{{\bf --}}{\leftmargin 15pt
\labelwidth\leftmargini\advance\labelwidth-\labelsep
\topsep 2pt \itemsep 1pt \parsep 1pt}}{\end{list}}
\newenvironment{itemise3}{\begin{list}{*}{\leftmargin 12pt
\labelwidth\leftmargini\advance\labelwidth-\labelsep
\topsep 0pt \itemsep 0pt \parsep 0pt}}{\end{list}}
\newcommand{\df}[1]{Definition~\ref{df-#1}}
\newcommand{\thm}[1]{Theorem~\ref{thm-#1}}
\newcommand{\pr}[1]{Proposition~\ref{pr-#1}}
\newcommand{\lem}[1]{Lemma~\ref{lem-#1}}
\newcommand{\cor}[1]{Corollary~\ref{cor-#1}}
\newcommand{\ex}[1]{Example~\ref{ex-#1}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\dcup}{\stackrel{\mbox{\huge .}}{\cup}} % disjoint union
\newcommand{\dl}[1]{\mbox{\rm I\hspace{-0.75mm}#1}} % openface letter
\newcommand{\dc}[1]{\mbox{\rm {\raisebox{.4ex}{\makebox % openface character
[0pt][l]{\hspace{.2em}\scriptsize $\mid$}}}#1}}
\newcommand{\IT}{\mbox{\sf T\hspace{-5.5pt}T}} % openface T (terms)
\newcommand{\plat}[1]{\raisebox{0pt}[0pt][0pt]{$#1$}} % no vertical space
\newcommand{\Id}[1]{[\hspace{-1.4pt}[#1]\hspace{-1.2pt}]} % denotation
\newcommand{\In}[1]{\plat{ % notation
\stackrel{\mbox{\tiny $/\hspace{-2.1pt}/$}}
{\raisebox{-.3ex}[.3ex]{\tiny $\backslash
\hspace{-2.1pt}\backslash$}}\!\!#1\!\!
\stackrel{\mbox{\tiny $\backslash\hspace{-1.8pt}\backslash$}}
{\raisebox{-.3ex}[.3ex]{\tiny $/\hspace{-1.8pt}/$}} }}
\newcommand{\rec}[1]{\plat{ % recursion
\stackrel{\mbox{\tiny $/$}}
{\raisebox{-.3ex}[.3ex]{\tiny $\backslash$}}
\!\!#1\!\!
\stackrel{\mbox{\tiny $\backslash$}}
{\raisebox{-.3ex}[.3ex]{\tiny $/$}} }}
\newcommand{\goto}[1]{\stackrel{#1}{\longrightarrow}} % transition
\newcommand{\gonotto}[1]{\hspace{4pt}\not\hspace{-4pt} % no transition
\stackrel{#1\ }{\longrightarrow}}
\newcommand{\si}{\stackrel{\rightarrow}{\raisebox{0pt}[1pt][0pt]{$\scriptstyle \leftarrow$}}}
\newcommand{\bis}[1]{ \; % bisimulation
\raisebox{.3ex}{$\underline{\makebox[.7em]{$\leftrightarrow$}}$}
\,_{#1}\,}
\newcommand{\nobis}[1]{\mbox{$\,\not\hspace{-2.5pt} % no bisimulation
\raisebox{.3ex}{$\underline{\makebox[.7em]{$\leftrightarrow$}}$}
\,_{#1}\,$}}
\newcommand{\pf}{\noindent {\bf Proof:\ }} % beginning of proof
\newcommand{\rest}{\unitlength 1mm % restriction operator
\begin{picture}(2.16,3.2)
\thinlines
\put(1.12,-0.48){\line(0,1){3.52}}
\put(0.8,1.6){\tiny $\backslash$}
\end{picture} }
\newcommand{\var}{{\it var}} % variables in a term
\newcommand{\Rule}[2]{ % transition rule
\frac{\raisebox{.7ex}{\normalsize{$#1$}}}
{\raisebox{-1.0ex}{\normalsize{$#2$}}}}
\newcommand{\pow}{{\cal P}}
\newcommand{\IN}{\dl{N}}
\newcommand{\IO}{\dc{O}}
\newcommand{\fO}{{\cal O}}
\newcommand{\IP}{\dl{P}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\title{\Large\bf Precongruence Formats for Decorated Trace Preorders}
\author{Bard Bloom\\
\small \sl IBM T.J.\ Watson Research Center\\[-3pt]
\small \sl Hawthorne, NY 10532, USA\\[-3pt]
\small \tt bardb@us.ibm.com
\and Wan Fokkink\thanks{Supported
by a grant from The Nuffield Foundation.}\\
\small \sl CWI\\[-3pt]
\small \sl PO Box 94079\\[-3pt]
\small \sl 1090 GB Amsterdam, NL\\[-3pt]
\small \tt wan@cwi.nl
\and Rob van Glabbeek\\
\small \sl Computer Science Department\\[-3pt]
\small \sl Stanford University\\[-3pt]
\small \sl Stanford, CA 94305-9045, USA\\[-3pt]
\small \tt rvg@cs.stanford.edu}
\date{}
\maketitle
%\thispagestyle{empty}
{\centering\large\bf Abstract\\}\noindent
{\em This paper explores the connection between semantic equivalences and
preorders for concrete sequential processes, represented by means of
labelled transition systems, and formats of transition system
specifications using Plotkin's structural approach. For several
preorders in the linear time -- branching time spectrum a format
is given, as general as possible, such that this preorder is a
precongruence for all operators specifiable in that format.
The formats are derived using the modal characterizations of the
corresponding preorders.}
\section{Introduction}
{\em Structural operational semantics} \cite{Pl81} provides process
algebras and specification languages with an interpretation. It
generates a {\em (labelled) transition system} in which states
are the closed terms over a (single-sorted, first-order) signature,
and transitions between states may be supplied with labels. The
transition relation is obtained from a {\em transition system
specification} (TSS), this being a set of proof rules called {\em
transition rules}.
In case of a TSS with only positive premises, the associated transition relation simply
consists of the transitions derivable from the transition rules. In
the presence of negative premises it is not always straightforward to
associate a transition relation to a TSS. One can for instance express that a
transition holds if it does not hold. In {\sc van Glabbeek}
\cite{vG95} the notion of derivability of transitions from a TSS is
extended to negated transitions by incorporating a notion of {\em
negation as failure} (cf.\ \cite{Cl78}): a {\em well-supported proof}
features a method to derive the negation of a
transition by demonstrating that that transition cannot be derived.
A TSS is {\em complete} \cite{vG95} if for each transition
there is a well-supported proof from the TSS either for the transition
itself or for its negation. The transition relation associated
to a complete TSS consists of the transitions for which there is a
well-supported proof. An incomplete TSS arguably does not specify
a transition relation in a meaningful way
at all. However, it specifies what one could call a {\em 3-valued}
transition relation, in which potential transitions are either present, absent
or unknown. This approach to the meaning of transition system
specifications can be seen as a proof-theoretic characterization of the
work of {\sc van Gelder, Ross \& Schlipf} \cite{GRS91} in logic
programming.
A wide range of semantic equivalences and preorders has been defined
on 2-valued transition relations. These preorders are
based on the branching structure of processes
(ready simulation \cite{BIM95}, bisimulation \cite{Mi89}),
on execution sequences (partial traces, completed traces, accepting traces),
or on decorated versions of execution sequences
(ready pairs \cite{OlHo86}, failure pairs \cite{BHR84,DH84},
ready traces \cite{BBK87b,Pnu85}, failure traces \cite{Phi87}).
In \cite{vG90a}, {\sc van Glabbeek} classified most equivalences and
preorders for concrete, sequential processes\footnote{A process is
{\em sequential} if it can do only one action at a time; {\em
concrete} refers to the absence of internal actions or internal
choice.} that occur in the literature, and motivated them
by means of testing scenarios, phrased in terms of
`button pushing experiments' on generative and reactive machines.
This gave rise to {\em modal characterizations} of the preorders,
i.e.\ characterizations in terms of the observations that an
experimenter could make during a session with a process.
These will here be taken as the definitions of the preorders.
In general a semantic equivalence (or preorder) induced
by a TSS is not a congruence (resp.\ precongruence), i.e.\ the equivalence
class of a term $f(t_1,\ldots,t_n)$ need not be determined by the
equivalence classes of its arguments $t_1,\ldots,t_n$. Being a
(pre)congruence is an important property, for instance in order
to fit the equivalence (or preorder) into an axiomatic framework.
Syntactic formats for TSSs have been developed w.r.t.\ several
semantic equivalences and preorders, to ensure that such an equivalence or
preorder as induced by a TSS in the corresponding format is a (pre)congruence.
These formats have helped to avoid repetitive (pre)congruence
proofs, and to explore the limits of sensible TSS definitions.
A first congruence format for bisimulation equivalence was put
forward by {\sc de Simone} \cite{dS85}, which was extended to the
GSOS format by {\sc Bloom, Istrail \& Meyer} \cite{BIM95} and to the
tyft/tyxt format by {\sc Groote \& Vaandrager} \cite{GrV92}.
(The latter format was supplied with a well-foundedness criterion,
which was subsequently eliminated \cite{FvG96}.)
The tyft/tyxt format was extended with negative premises
\cite{BolG96,Gr93} to obtain the so-called ntyft/ntyxt format,
which works for the class of complete TSSs. The ntyft/ntyxt format
generalizes the GSOS format.
To mention some formats for other equivalences and preorders,
{\sc Vaandrager} \cite{Va91a} observed that de Simone's format is a
precongruence format for the partial trace and the failure preorders,
{\sc Bloom} \cite{Bl94} introduced a more general congruence format
for partial trace equivalence,
and {\sc Fokkink} \cite{Fok99a} introduced a precongruence format
for the accepting trace preorder. Finally, {\sc van Glabbeek}
\cite{vG93a} introduced a congruence format for ready simulation
equivalence, which generalizes the GSOS format.
In this paper precongruence formats are proposed for several
semantic preorders based on decorated traces, building on results
reported in \cite{Bl93,vG93a}.
We introduce precongruence formats for the ready trace preorder,
the readiness preorder, the failure trace preorder and
the failure preorder. The precongruence formats for the last two
preorders coincide.
Following \cite{Bl93,Fok99a} these three precongruence formats
distinguish between `frozen' and `liquid' arguments of function symbols.
This distinction is used in posing restrictions on occurrences
of variables in transition
rules. The ready simulation format of \cite{vG93a} is more liberal than the
format for the ready trace preorder, which is more liberal than the format
for the readiness preorder, which is more liberal than the format for
the failure trace preorder, which in turn is more liberal than de Simone's format.
The three precongruence formats introduced in this paper apply to
incomplete TSSs as well. For this purpose the definitions of the
corresponding preorders are extended to 3-valued transition relations.
The precongruence formats put forward in this paper
were obtained by a careful study of the
modal characterizations of the preorders in question.
The outline of the proof underlying each of our precongruence results
is as follows (detailed proofs can be found in \cite{BFG00}).
First, any TSS in the ready simulation format is
transformed into an equivalent TSS---equivalent in the sense that is
proves the same transitions and negated transitions---of a special
form, in which the left-hand sides of positive premises are single
variables. Next, any such TSS is
extended with a number of transition rules with negative conclusions,
in such a way that a (negated) transition has a well-supported proof from
the original TSS if and only if it has a standard proof from the extended
TSS\@. In the extended TSS, the left-hand sides of positive and negative
premises can further be reduced to single variables.
It is shown for each of the precongruence formats in this paper
that its syntactic criteria are preserved under these transformations.
Finally, the resulting transition rules are captured by means of
logical formulas that are within the modal characterization of the
preorder in question. This implies the desired precongruence result.
This paper is set up as follows.
Section \ref{sec:preliminaries} presents the basics of structural operational
semantics and defines the preorders based on decorated traces
w.r.t.\ 3-valued transition relations. Section \ref{sec:formats} recalls
the ready simulation format of \cite{vG93a} and formulates extra requirements
to obtain new precongruence formats for the decorated trace preorders.
Section \ref{sec:sketch} sketches the proofs of the precongruence results,
using the definitions of preorders in terms of observations.
Section \ref{sec:fullabstraction} contains full abstraction
properties of the various precongruence formats: for each format we
determine the coarsest congruence with respect to all operators in
that format that is finer than partial or completed trace equivalence.
In Section \ref{sec:counterexamples}, counterexamples are given to show
that all syntactic restrictions are essential for the obtained
precongruence results.
Finally, Section \ref{sect:applications} presents some
applications of the precongruence formats to TSSs from the literature.
The reader is referred to \cite{BFG00} for detailed proofs and more
applications.
\section{Preliminaries}
\label{sec:preliminaries}
$V$ and $A$ are two sets of {\em variables} and {\em
actions}. In our proofs (see \cite{BFG00}) it is used that
$V$ is infinite and at least at large as $A$ (i.e. $|V| \geq |A|$).
Many concepts that will appear later on are parameterized by the
choice of $V$ and $A$, but as in this paper this choice is fixed, a
corresponding index is suppressed. A syntactic object is
{\em closed} if it does not contain any variables from $V$.
\begin{definition}{signature}
A {\em signature} is a collection $\Sigma$ of {\em function symbols}
$f \not\in V$, with $|\Sigma| \leq |V|$, equipped with a function $ar:
\Sigma \rightarrow \IN$.
The set $\IT(\Sigma)$ of {\em terms} over a signature $\Sigma$ is
defined recursively by:
\begin{itemise}
\item $V \subseteq \IT(\Sigma)$,
\item $f(t_1,\ldots,t_{ar(f)}) \in \IT(\Sigma)$ for all functions symbols
$f \in \Sigma$ and terms $t_1,\ldots,t_{ar(f)} \in \IT(\Sigma)$.
\end{itemise}
A term $c()$ is abbreviated as $c$. For $t\in \IT(\Sigma)$,
$\var(t)$ denotes the set of variables that occur in $t$. \linebreak[3]
$T(\Sigma)$ is the set of closed terms over $\Sigma$, i.e.\
the terms $t \in \IT(\Sigma)$ with $\var(t)=\emptyset$.
A {\em $\Sigma$-substitution} $\sigma$ is a partial function from $V$
to $\IT(\Sigma)$. If $\sigma$ is a substitution and $S$ is any syntactic
object, then $\sigma(S)$ denotes the object obtained from $S$ by
replacing, for $x$ in the domain of $\sigma$, every occurrence of $x$
in $S$ by $\sigma(x)$. In that case $\sigma(S)$ is called a {\em
substitution instance} of $S$. A $\Sigma$-substitution is {\em closed} if
it is a total function from $V$ to $T(\Sigma)$.
\end{definition}
\begin{definition}{TSS}
Let $\Sigma$ be a signature. A {\em positive $\Sigma$-literal} is an
expression $t \goto{a} t'$ and a {\em negative $\Sigma$-literal} an
expression $t \gonotto{a}$ with $t,t'\in \IT(\Sigma)$ and $a \in
A$. For $t,t' \in \IT(\Sigma)$ the literals $t \goto{a} t'$ and $t
\gonotto{a}$ are said to {\em deny} each other. A {\em transition
rule} over $\Sigma$ is an expression of the form $\frac{H}{\alpha}$
with $H$ a set of $\Sigma$-literals (the {\em premises} of the the
rule) and $\alpha$ a $\Sigma$-literal (the {\em conclusion}). The
left- and right-hand side (if any) of $\alpha$ are called the {\em
source} and the {\em target} of the rule, respectively. A rule
$\frac{H}{\alpha}$ with $H=\emptyset$ is also written $\alpha$.
A {\em transition system specification (TSS)} over $\Sigma$
is a collection of transition rules over $\Sigma$. A TSS is {\em
standard} if all its rules have positive conclusions, and {\em
positive} if moreover all premises of its rules are positive.
\end{definition}
The concept of a positive TSS was introduced by {\sc Groote \&
Vaandrager} \cite{GrV92}; negative premises were added by {\sc Groote}
\cite{Gr93}. The resulting notion constitutes the first formalization
of {\sc Plotkin}'s {\em structural operational semantics (SOS)} \cite{Pl81}
that is sufficiently general to cover most of its applications.
TSSs with negative conclusions are introduced here, because they are needed
as intermediate steps in our proofs for standard TSSs.
The following definition tells when a literal is provable from a
TSS\@. It generalizes the standard definition (see e.g.\ \cite{GrV92})
by allowing the derivation of transition rules. The
derivation of a literal $\alpha$ corresponds to the derivation
of the transition rule $\frac{H}{\alpha}$ with $H=\emptyset$.
The case $H \neq \emptyset$ corresponds to the derivation of
$\alpha$ under the assumptions $H$.
\begin{definition}{proof}
Let $R$ be a TSS over a signature $\Sigma$. An {\em irredundant proof}
of a transition rule $\frac{H}{\alpha}$ from $R$ is a well-founded, upwardly
branching tree of which the nodes are labelled by $\Sigma$-literals,
and some of the leaves are marked ``hypothesis'', such that:
\begin{itemise}
\item the root is labelled by $\alpha$,
\item $H$ is the set of labels of the hypotheses, and
\item if $\beta$ is the label of a node $q$ which is not an hypothesis
and $K$ is the set of labels of the nodes directly above $q$, then
$\frac{K}{\beta}$ is a substitution instance of a transition rule in $R$.
\end{itemise}
A {\em proof} of $\frac{K}{\alpha}$ from $R$ is an
irredundant proof of $\frac{H}{\alpha}$ from $R$ with $H\subseteq K$.
If an (irredundant) proof of $\frac{K}{\alpha}$ from $R$ exists, then
$\frac{K}{\alpha}$ is {\em (irredundantly) provable} from $R$,
notation $R \vdash \frac{K}{\alpha}$ (resp.\ $R \vdash_{\rm irr}
\frac{K}{\alpha}$).
\end{definition}
The main purpose of a transition system specification (over a
signature $\Sigma$) is to specify a transition relation (over
$\Sigma$). Here a {\em transition relation} over $\Sigma$ can be
defined as a set of closed positive $\Sigma$-literals ({\em
transitions}). A positive TSS specifies a transition relation in a
straightforward way as the set of all provable transitions. But as
pointed out by {\sc Groote} \cite{Gr93}, it is much less trivial to
associate a transition relation to a TSS with negative premises; in
particular there are TSSs that appear not to specify a transition
relation in a meaningful way at all. In {\sc van Glabbeek} \cite{vG95}
eleven answers to the questions ``{\em Which TSSs are meaningful, and
which transition relations do they specify?}'' are reviewed. The
``most general solution without undesirable properties'' is due to
{\sc van Gelder, Ross \& Schlipf} \cite{GRS91} in the setting of logic
programming, and has been adapted to TSSs by {\sc Bol \& Groote}
\cite{BolG96}. In \cite{vG95} it has been reformulated in terms of
completeness w.r.t.\ a notion of provability of closed
literals that incorporates a form of {\em negation as failure}.
\begin{definition}{wsp}
Let $R$ be a standard TSS over a signature $\Sigma$. A {\em
well-supported proof} of a closed literal ${\alpha}$ from $R$ is a
well-founded, upwardly branching tree of which the nodes are labelled
by closed $\Sigma$-literals, such that:
\begin{itemize}\vspace{-1ex}
\item the root is labelled by $\alpha$, and\vspace{-1ex}
\item if $\beta$ is the label of a node $q$ and $K$ is the set of
labels of the nodes directly above $q$, then
\begin{enumerate}\vspace{-1ex}
\item either $\frac{K}{\beta}$ is a closed substitution instance of a
transition rule in $R$
\vspace{-1ex}
\item or $\beta$ is negative and for every set $N$ of negative
closed literals such that $R \vdash \frac{N}{\gamma}$ for $\gamma$ a closed
literal denying $\beta$, a literal in $K$ denies one in $N$.\vspace{-1ex}
\end{enumerate}
\end{itemize}
$\alpha$ is {\em $ws$-provable} from $R$, notation $R\vdash_{ws}
\alpha$, if a well-supported proof of $\alpha$ from $R$ exists.
\end{definition}
Note that the proof-steps 1 and 2 establish the validity of $\beta$
when $K$ is the set of literals established earlier. Step 2 allows to
infer $t\gonotto{a}$ whenever it is manifestly impossible to infer
$t\goto{a}t'$ for some term $t'$ (because every conceivable proof of
$t\goto{a}t'$ involves a premise that has already been refuted).
This practice is sometimes referred to as {\em negation as failure}
\cite{Cl78}.
\begin{definition}{complete}
A standard TSS $R$ is {\em complete} if for any closed literal
$t\gonotto{a}$ either $R\vdash_{ws} t\goto{a}t'$ for some closed term
$t'$ or $R\vdash_{ws} t\gonotto{a}$.
\end{definition}
Now a standard TSS is meaningful, in the sense that it specifies a
transition relation, iff it is complete. The specified transition
relation is then the set of all $ws$-provable transitions.
In the present paper this solution is extended by considering all
standard TSSs to be meaningful. However, following {\sc van Gelder,
Ross \& Schlipf} \cite{GRS91}, the meaning of an incomplete TSS is now
not given by a two-valued transition relation as defined above, but
by a three-valued transition relation, in which a potential transition
can be {\em true}, {\em false} or {\em unknown}. In fact, a slight
abstraction of this notion will suffice, in which a transition
relation is simply defined as a set of closed, positive and negative,
literals.
\begin{definition}{transition}
Let $\Sigma$ be a signature. A {\em (3-valued) transition relation}
over $\Sigma$ is a set of closed $\Sigma$-literals, not containing
literals that deny each other. A transition relation $\rightarrow$ is {\em
2-valued} if it satisfies
$$(t \gonotto{a}) \in \;\rightarrow ~ \Leftrightarrow
~\forall t'\in T(\Sigma) : (t \goto{a} t') \not\in \;\rightarrow.$$
The transition relation {\em associated} to a standard TSS over
$\Sigma$ is the set of closed $\Sigma$-literals that are $ws$-provable
from that TSS.
\end{definition}
In \cite{vG95} is has been shown that $\vdash_{ws}$ is consistent, in
the sense that no standard TSS admits well-supported proofs of two
literals that deny each other. Thus the transition relation associated
to a standard TSS is indeed a transition relation as defined above.
Note that if a standard TSS $R$ is complete, its associated transition
relation is 2-valued. This means that the negative literals in
its associated transition relation are completely determined by the
positive ones; hence the transition relation can be simply given by
its positive part.
In the literature several preorders have been defined in terms of
(2-valued) transition relations, and provided with modal
characterizations. Below we employ these modal characterizations
as the definitions of these preorders, and simultaneously extend
these definitions to 3-valued transition relations.
\begin{definition}{potential-observations}
Assume an action set $A$.
The set $\IO$ of {\em potential observations} or {\em modal formulas}
is defined inductively by:
\begin{list}{}{\labelwidth\leftmargini\advance\labelwidth-\labelsep
\topsep 2pt \itemsep 1pt \parsep 1pt}
\item [$\top \in \IO.$] The trivial observation, obtained by
terminating the session.
\item [$a\varphi \in \IO$] if $\varphi \in \IO$ and $a \in A$.
The observation of an action $a$, followed by the observation $\varphi$.
\item [$\widetilde{a} \in \IO$] for $a \in A$.
The observation that the process cannot perform the action $a$.
\item [$\bigwedge_{i\in I}\varphi_i \in \IO$] if $\varphi_i \in \IO$
for all $i\in I$. The process admits each of the observations
$\varphi_i$.
\end{list}
\end{definition}
\begin{definition}{preorders}
Let $R$ be a standard TSS over a signature $\Sigma$. The {\em satisfaction
relation} $\models_R\; \subseteq T(\Sigma) \times \IO$, telling which
observations are possible for which process, is
inductively defined by the clauses below (in which $p,q \in
T(\Sigma)$).\vspace{-3pt}
$${\renewcommand{\arraystretch}{1.1}\arraycolsep 2pt \begin{array}{@{}llll@{}}
p \models_R \top \\
p \models_R a\varphi & \mbox{if} & \exists q: R \vdash_{ws} p \goto{a} q
\wedge q \models_R \varphi \\
p \models_R \widetilde{a} & \mbox{if} & R \vdash_{ws} p \gonotto{a}\\
p \models_R \bigwedge_{i\in I}\varphi_i& \mbox{if}&p \models_R \varphi_i
\mbox{ for all } i\in I\\
\end{array}}$$
\end{definition}
We will use the binary conjunction $\varphi_1 \wedge \varphi_2$ as an
abbreviation of $\bigwedge_{i \in \{1,2\}} \varphi_i$, whereas $\top$
is identified with the empty conjunction. We identify formulas that are
logically equivalent using the laws for conjunction $T \wedge \varphi
\cong \varphi$ and $\bigwedge_{i \in I}(\bigwedge_{j \in J_i} a_{ij})
\cong \bigwedge_{k \in K}a_k$ where $K=\{ij \mid i \in I \wedge j
\in J_i\}$. This is justified because $\varphi \cong \psi$ implies $p
\models_R \varphi \Leftrightarrow p \models_R \psi$.
\begin{definition}{sublanguages}
Below, several sublanguages of the set $\IO$ of observations are defined.
\begin{list}{}{\renewcommand{\makelabel}[1]{#1\hfill}
\leftmargin 30pt \labelwidth\leftmargin\advance\labelwidth-\labelsep
\topsep 2pt \itemsep 1pt \parsep 1pt}
\item [$\IO_{T}$] $\varphi ::= \top \mid a\varphi'$
\hfill {\em (partial) trace} observations
\item [$\IO_{CT}$] $\varphi ::= \top \mid a\varphi'
\mid \bigwedge_{a \in A} \widetilde{a}$ \\
\hspace*{1cm} \hfill {\em completed trace} observations
\item [$\IO_F$] $\varphi ::= \top \mid a\varphi'
\mid \bigwedge_{i\in I} \widetilde{a_i}$
\hfill {\em failure} observations
\item [$\IO_R$] $\varphi ::= \top \mid a\varphi'
\mid \bigwedge_{i\in I} \widetilde{a_i}
\wedge \bigwedge_{j\in J} b_j \top$ \\
\hspace*{1cm} \hfill {\em readiness} observations
\item [$\IO_{FT}$] $\varphi ::= \top \mid a\varphi'
\mid \bigwedge_{i\in I} \widetilde{a_i}\wedge\varphi'$ \\
\hspace*{1cm} \hfill {\em failure trace} observations
\item [$\IO_{RT}$] $\varphi ::= \top \mid a\varphi'
\mid \bigwedge_{i\in I} \widetilde{a_i}
\wedge \bigwedge_{j\in J} b_j \top\wedge\varphi'$ \\
\hspace*{1cm} \hfill {\em ready trace} observations
\end{list}
\end{definition}
For each of these notions the {\em $N$-observations
of $p\in T(\Sigma)$} are given by $\fO_N^R(p) := \{\varphi \in \IO_N
\mid p \models_R \varphi\}$. The {\em $N$-preorder induced by $R$} is
defined by $p \sqsubseteq_N^R q$ if $\fO_N^R(p) \subseteq \fO_N^R(q)$.
When clear from the context, the superscript $R$ will be omitted.
In fact a slight reformulation of this definition will be needed in this
paper.
\begin{definition}{conjunctive sublanguages}
For $N\in\{T,CT,F,R,FT,RT\}$ let $\IO_N^\wedge$ consist of all formulas
$\bigwedge_{i \in I} \varphi_i$ with $\varphi_i \in \IO_N$.
Let $\fO_N^\wedge(p) := \{\varphi \in \IO_N^\wedge \mid p \models \varphi\}$.
\end{definition}
Clearly $\fO_N (p) \subseteq \fO_N (q) \Leftrightarrow
\fO_N^\wedge (p) \subseteq \fO_N^\wedge (q)$.
\begin{proposition}{modal}
For $N\in\{T,CT,F,R,FT,RT\}$ we have $p \sqsubseteq_N q$ iff
$\fO_N^\wedge (p) \subseteq \fO_N^\wedge (q)$.
\end{proposition}
\begin{definition}{precongruence}
Let $\Sigma$ be a signature. A preorder $\sqsubseteq$ on $T(\Sigma)$
is a {\em precongruence} if for all $f \in \Sigma$
\vspace{-5pt}
$$\begin{array}{cl}
& p_i \sqsubseteq q_i \mbox{ for } i=1,\ldots,ar(f) \\
\Rightarrow &
f(p_1,\ldots,p_{ar(f)}) \sqsubseteq f(q_1,\ldots,q_{ar(f)}).
\end{array}$$
\end{definition}
This is equivalent to the requirement that for all $t \in \IT(\Sigma)$
and closed substitutions $\sigma, \sigma': V \rightarrow T(\Sigma)$
\pagebreak
\[
\sigma(x) \sqsubseteq \sigma'(x) \mbox{ for } x\in \var(t)
~\Rightarrow~ \sigma(t) \sqsubseteq \sigma'(t).
\]
For every preorder $\sqsubseteq_N$ defined above there exists an
associated equivalence $=_N$ (the {\em kernel} of $\sqsubseteq_N$)
given by $p =_N q$ iff $p \sqsubseteq_N q \wedge q \sqsubseteq_N p$.
Obviously $p =_N q$ iff $\fO_N(p) = \fO_N(p)$ iff
$\fO_N^\wedge(p) = \fO_N^\wedge(p)$.
In case a relation is an equivalence as well as a
precongruence, it is called a {\em congruence}. Note that if
$\sqsubseteq_N$ is a precongruence, its kernel is a congruence.
Thus by establishing precongruence results for the preorders
$\sqsubseteq_N$, we also obtain congruence results for the associated
equivalences $=_N$.
\section{Precongruence formats}
\label{sec:formats}
In this section we define the formats for TSSs that play a
r\^ole in this paper and state the precongruence results that we
have established.
\begin{definition}{ntyft-ntyxt}
An {\em ntytt rule} is a transition rule in which
the right-hand sides of positive premises are variables that are all
distinct, and that do not occur in the source.
An ntytt rule is an {\em ntyxt rule}
if its source is a variable, and an {\em ntyft rule} if its
source contains exactly one function symbol and no multiple
occurrences of variables.
An ntytt rule (resp.\ ntyft rule) is an {\em nxytt rule} (resp.\ {\em
nxyft rule}) if the left-hand sides of its
premises are variables.
\end{definition}
\begin{definition}{free}
A transition rule has {\em no lookahead} if the variables occurring
in the right-hand sides of its positive premises do not occur in
the left-hand sides of its premises.
A variable occurring in a transition rule is {\em free} if it
does not occur in the source nor in the right-hand sides of
the positive premises of this rule.
We say that a transition rule is {\em decent} if it has no lookahead and
does not contain free variables.
\end{definition}
Each combination of syntactic restrictions on transition rules
induces a corresponding syntactic format for TSSs of the same name.
For instance, a TSS is in {\em decent ntyft} format if it consists
of decent ntyft rules. We proceed to define further syntactic
formats for TSSs.
\begin{definition}{formats-old}
A TSS is in {\em ntyft/ntyxt format}
if it contains only ntyft and ntyxt rules.
A TSS is in {\em ready simulation format} if it is in
ntyft/ntyxt format and its transition rules have no lookahead.
\end{definition}
\begin{definition}{propagated}
An occurrence of a variable in an ntytt rule is {\em propagated}
if the occurrence is either in the target, or in the left-hand side of
a positive premise whose right-hand side occurs in the target.
An occurrence of a variable in an ntytt rule is {\em polled} if the
occurrence is in the left-hand side of a premise that does not have
a right-hand side occurring in the target.
\end{definition}
Consider for instance the transition rules of \ex{ex2} in Section
\ref{sec:counterexamples}. In the second rule both occurrences of $x$
in the premises are propagated, i.e.\ the variable $x$ is propagated
twice. In the third rule the variables $x_1$ and $x_2$ are polled
once each. We can think of a process, represented by a variable in a
transition rule, as being {\em copied} if the variable is propagated
more than once. The process is {\em tested} if the variable is either
propagated or polled.
Our precongruence formats operate by keeping track of which variables
represent running processes, and which do not. For example, it is
semantically reasonable to copy a process before it starts,
effectively getting information about all the conjuncts in a $\phi \in
\IO^\wedge$.
However, copying a running process would give information about the
branching structure of the process, which is incompatible with any
form of decorated trace semantics.
We introduce a predicate $\Lambda$ as the basis for determining the
$\Lambda$-{\em floating} variables, which represent processes that may
be running.
\begin{definition}{floating}
Let $\Sigma$ be a signature, and $\Lambda$ a unary predicate on $\{(f,i)
\mid 1 \leq i \leq ar(f),~f \in \Sigma \}$. If $\Lambda(f,i)$, then
we say that argument $i$ of $f$ is {\em liquid}; otherwise it is {\em frozen}.
An occurrence of a variable $x$ in a term $t \in \IT(\Sigma)$ is
{\em $\Lambda$-liquid} if either $t=x$, or $t=f(t_1,\ldots,t_{ar(f)})$
and the occurrence of $x$ is $\Lambda$-liquid in $t_i$ for some liquid
argument $i$ of $f$.
A variable in an ntytt rule over $\Sigma$ is {\em $\Lambda$-floating}
if either it occurs as the right-hand side of a positive premise,
or it occurs exactly once in the source, at a $\Lambda$-liquid position.
\end{definition}
Note that an occurrence of a variable $x$ in a term $t \in \IT(\Sigma)$
is $\Lambda$-liquid iff $t$ does not contain a subterm
$f(t_1,\ldots,t_{ar(f)})$ such that the occurrence of $x$ is in $t_i$
for a frozen argument $i$ of $f$.
\begin{definition}{formats}
Let $\Lambda$ be a unary predicate on arguments of function symbols.
A standard ntytt rule is {\em $\Lambda$-ready trace safe} if
\begin{itemise}
\item it has no lookahead, and
\item each $\Lambda$-floating variable is propagated at most once,
and at a $\Lambda$-liquid position.
\end{itemise}
The rule is {\em $\Lambda$-readiness safe} if
\begin{itemise}
\item it is $\Lambda$-ready trace safe, and
\item each $\Lambda$-floating variable is not both propagated and polled.
\end{itemise}
The rule is {\em $\Lambda$-failure trace safe} if
\begin{itemise}
\item it is $\Lambda$-readiness safe, and
\item each $\Lambda$-floating variable is polled at most once,
at a $\Lambda$-liquid position in a positive premise.
\end{itemise}
\end{definition}
The second restriction on ``$\Lambda$-ready trace safe" guarantees that
a running process is never copied, and continued to be
marked as running after it has executed. The ``$\Lambda$-readiness safe"
restriction ensures that only at the end of its execution
a running process is tested multiple times.
The ``$\Lambda$-failure trace safe" restriction further
limits to a positive test on a single action.
\begin{definition}{ready-trace}
A standard TSS is in {\em ready trace format} if it is in ntyft/ntyxt format
and its rules are $\Lambda$-ready trace safe w.r.t.\ some $\Lambda$.
A standard TSS is in {\em readiness format} if it is in ntyft/ntyxt format
and its rules are $\Lambda$-readiness safe w.r.t.\ some $\Lambda$.
A standard TSS is in {\em failure trace format} if it is in ntyft/ntyxt format
and its rules are $\Lambda$-failure trace safe w.r.t.\ some $\Lambda$.
\end{definition}
If a standard TSS $R$ is in ready trace (resp.\ readiness
or failure trace) format, then there is a smallest predicate
$\Lambda_0$ such that the rules in $R$ are $\Lambda_0$-ready trace
(resp.\ $\Lambda_0$-readiness or $\Lambda_0$-failure trace) safe.
In the context of the ready trace format, for instance,
$\Lambda_0$ can be defined as the smallest predicate $\Lambda$ such
that for all rules of $R$ each $\Lambda$-floating variable is
propagated at $\Lambda$-liquid positions only. Now $R$ is in ready
trace format iff it has no lookahead and in all of its rules each
$\Lambda_0$-floating variable is propagated at most once. Therefore, in
the context of a given standard TSS and a given format, positions can be called
{\em liquid} and variables {\em floating} without mentioning a
specific predicate $\Lambda$; in such a case $\Lambda_0$ may be
assumed.
\begin{theorem}{ready-trace}
If a standard TSS is in ready trace format, then the ready trace preorder
that it induces is a precongruence.
\end{theorem}
\begin{theorem}{readiness}
If a standard TSS is in readiness format, then the readiness preorder
that it induces is a precongruence.
\end{theorem}
\begin{theorem}{failure-trace}
If a standard TSS is in failure trace format, then the failure trace and
failure preorders that it induces are precongruences.
\end{theorem}
See \cite{BFG00} for detailed proofs, or the next section for an
outline, of Theorems \ref{thm-ready-trace}--\ref{thm-failure-trace}.
Section \ref{sec:counterexamples} presents a series of counterexamples
showing that the syntactic restrictions formulated above are essential
for the claimed precongruence results. These counterexamples also help in
motivating the definitions above.
For comparison with the literature we point out that a standard TSS is in
GSOS format \cite{BIM95} iff it is in nxyft format and its transition rules
have no lookahead. A standard TSS is in de Simone's format iff it is positive,
in nxyft format, and its rules are $\Lambda$-failure trace safe with
$\Lambda$ the universal predicate (making all arguments of function
symbols liquid).
\section{Proof sketch}
\label{sec:sketch}
This section outlines the proofs of Theorems
\ref{thm-ready-trace}--\ref{thm-failure-trace}.
The reader is referred to \cite{BFG00} for detailed proofs of the
propositions in this section.
The proofs use transition rules with negative conclusions.
For this reason, the ready trace, readiness and failure trace
formats need to be extended to non-standard TSSs.
\begin{definition}{formats negative}
Let $\Lambda$ be a unary predicate on arguments of function symbols.
A ntytt rule with a negative conclusion is {\em $\Lambda$-ready trace
safe} or {\em $\Lambda$-readiness safe} if it has no lookahead.
The rule is {\em $\Lambda$-failure trace safe} if
\begin{itemise}
\item it is $\Lambda$-readiness safe, and
\item $\Lambda$-floating variables are polled only at $\Lambda$-liquid
positions and only in negative premises.
\end{itemise}
\end{definition}
Now \df{ready-trace} applies to non-standard TSSs as well.
Note that for $\Lambda$-ready trace and $\Lambda$-readiness safety the
requirements are the same as in the standard case, for in a rule with
a negative conclusion no variable is propagated. In the definition of
$\Lambda$-failure trace safety, however, rules with positive and negative
conclusions are treated differently.
Theorems \ref{thm-ready-trace}--\ref{thm-failure-trace} deal with
preorders induced by standard TSSs through the notion of well-founded
provability (of \df{wsp}). The next proposition states that without
loss of generality we may use the classical notion of provability
(of \df{proof}) instead.
\begin{proposition}{plus}
Let $R$ be a standard TSS in ready simulation format.
Then there is a TSS $R^+$ in decent ntyft format such that
$R^+ \vdash \alpha \Leftrightarrow R \vdash_{ws} \alpha$ for all
closed literals $\alpha$, and if $R$ is in ready trace
(resp.\ readiness or failure trace) format then so is $R^+$.
\end{proposition}
In general $R^+$ is not a standard TSS.
It is for this reason that rules with a negative conclusion have been
introduced in \df{TSS}, and that the precongruence formats were extended
to non-standard TSSs in \df{formats negative}.
\begin{definition}{ruloid}
Let $R$ be a standard TSS in ready simulation format. An {\em $R$-ruloid}
is a decent nxytt rule, irredundantly provable from $R^+$.
\end{definition}
\begin{proposition}{preservation}
Let $R$ be a standard TSS in ready trace (resp.\ readiness or failure
trace) format. All $R$-ruloids are $\Lambda$-ready trace (resp.\
$\Lambda$-readiness or $\Lambda$-failure trace) safe for some $\Lambda$.
\end{proposition}
The next proposition says, for $R$ a standard TSS in decent ntyft
format, that for any function symbol $f$ there are a number of $R$-ruloids
with source $f(x_1,\ldots,x_{ar(f)})$
that are ``just right'' for $f$. Here ``just
right'' means that for any (closed) literal \plat{f(t_1,\ldots,t_{ar(f)})
\gonotto a} or $f(t_1,\ldots,t_{ar(f)}) \goto a t'$ that is $ws$-provable
from $R$ there is a (closed) proof using one of those rules as the
last step. Moreover, the same holds not only for function symbols $f$,
but for arbitrary open terms. If for an open term $t$ with
$\var(t)=\{x_1,\ldots,x_n\}$ we would introduce an $n$-ary function
symbol $f_t$ such that $f_t(x_1,\ldots,x_{n})$ is just a shorthand for
$t$, then there exists a collection of $R$-ruloids
that is just right for $f_t$.
\begin{proposition}{ruloid}
Let $R$ be a standard TSS in ready simulation format. Then
$R\vdash_{ws}\sigma(t)\gonotto a$ [respectively
$R\vdash_{ws}\sigma(t)\goto a t'$]
for $t$ a term, [$t'$ a closed term] and $\sigma$ a closed substitution,
iff there are an $R$-ruloid $\frac{H}{t\gonotto a}$
[resp.\ $\frac{H}{t\goto a u}$]
and a closed substitution $\sigma'$ with $R\vdash_{ws}\sigma'(\alpha)$
for $\alpha \in H$, $\sigma'(t)=\sigma(t)$ [and $\sigma'(u)=t'$].
\end{proposition}
The following definition assigns to each term and each observation in
$\IO$ a collection of mappings from variables to $\IO$.
This construct will play a crucial r\^ole in the proof of \cor{congruence}.
Intuitively, $t^{-1}_R(\varphi)$ consists of observational
reformulations of the $R$-ruloids with source $t$ that validate the
observation $\varphi$ for the term $\sigma(t)$, for any closed
substitution $\sigma$, in terms of the observations that can be made
for the terms $\sigma(x)$ for $x \in \var(t)$.
\begin{definition}{inverse}Let $\Sigma$ be a signature, and let $R$ be a
standard TSS over $\Sigma$ in ready simulation format.
Then $\cdot^{-1}_R:\IT(\Sigma)\rightarrow(\IO\rightarrow
\pow(V\rightarrow\IO))$ is defined by:
\begin{itemize}
\item $t^{-1}_R (\top) = \{\psi\}$ with $\psi(x)=\top$ for $x \in V$.
\item $\psi \in t^{-1}_R(\widetilde{a})$ iff there is an $R$-ruloid
$\frac{H}{t\gonotto{a}}$ and
$\psi: V \rightarrow \IO$ is given by $$\begin{array}{ll@{}}
\displaystyle \psi(x) = \bigwedge_{(x \gonotto{b})
\in H}\!\!\!\!\!\widetilde{b} \,\,\,\, \wedge
\bigwedge_{(x\goto{c}y)\in H}\!\!\!\!\! c\top
& \mbox{~for~} x \in \var(t)\\
\psi(x) = \top & \mbox{~for~} x \not\in \var(t).\end{array}$$
\item $\psi \in t^{-1}_R(a\varphi)$ iff there are an $R$-ruloid
$\frac{H}{t\goto{a} u}$ and a $\chi \in u^{-1}_R (\varphi)$ and
$\psi: V \rightarrow \IO$ is given by
$$\displaystyle \psi(x) = \chi(x) \wedge \bigwedge_{(x \gonotto{b})
\in H}\!\!\!\!\!\widetilde{b} \,\,\,\, \wedge
\bigwedge_{(x\goto{c}y)\in H}\!\!\!\!\! c\chi(y)$$
for $x \in \var(t)$ and $\psi(x) = \top$ otherwise.
\item $t^{-1}_R (\bigwedge_{i\in I}\varphi_i)=\{\bigwedge_{i\in I}
\psi_i \,|\, \psi_i \in t^{-1}_R (\varphi_i) \mbox{ for } i\in I\}$.
\end{itemize}
\end{definition}
The next proposition can be proved using \pr{ruloid}.
\begin{proposition}{inverse} Let $\Sigma$ be a signature, and let $R$ be a
standard TSS over $\Sigma$ in ready simulation format. Let $\varphi \in \IO$.
For any term $t \in \IT(\Sigma)$ and closed substitution $\sigma: V
\rightarrow T(\Sigma)$ one has
\[
\begin{array}{cl}
& \sigma(t) \models_R \varphi \\
\Leftrightarrow & \exists \psi \in t^{-1}_R
(\varphi)~\forall x\in \var(t): \sigma(x) \models_R \psi(x).
\end{array}
\]
\end{proposition}
In order to arrive at the desired precongruence results we need to
know that if a standard TSS is in the desired format $N$, and
$\varphi$ is a potential $N$-observation of a closed term $\sigma(t)$,
then the observations of $\sigma(x)$ for $x \in \var(t)$ that determine
whether or not $\varphi$ is an $N$-observation of $\sigma(t)$ are also
$N$-observations. This is established in the following two propositions.
In fact, our formats have been found by investigating what was needed
to make these propositions hold.
The work is divided over two propositions. \pr{formula preservation} deals
with those variables of $t$ that are floating in the $R$-ruloids with
source $t$. As the proof inductively refers to terms that can be
thought of as successors of $t$ after performing a number of actions,
and as these terms may contain variables $y$ representing successors of
the arguments of $t$ after performing several actions, the observations
employed should be the ones from \df{sublanguages}. \pr{formula
preservation frozen} extends this to arbitrary variables. As
non-floating variables represent processes in their initial state only,
that proposition may use the richer language of observations employed in
\df{conjunctive sublanguages}, which is much less cumbersome. This
enables the absence of restrictions on non-floating variables in the
definitions of the formats.
\begin{proposition}{formula preservation}
Let $R$ be a standard TSS in ready simulation format, and let
$\Lambda$ be an unary predicate on arguments of function symbols.
Let $t \in \IT(\Sigma)$, $\varphi \in \IO$, $\psi \in
t_R^{-1}(\varphi)$ and $x \in \var(t)$, such that $x$ occurs only once
in $t$, and at a $\Lambda$-liquid position.
\begin{itemise}
\item
If the transition rules in $R^+$ are $\Lambda$-ready trace safe and
$\varphi \in \IO_{\it RT}$ then $\psi(x) \in \IO_{\it RT}$.
\item
If the transition rules in $R^+$ are $\Lambda$-readiness safe and
$\varphi \in \IO_{\it R}$ then $\psi(x) \in \IO_{\it R}$.
\item
If the transition rules in $R^+$ are $\Lambda$-failure trace safe and
$\varphi \in \IO_{\it FT}$ then $\psi(x) \in \IO_{\it FT}$.
\item
If the transition rules in $R^+$ are $\Lambda$-failure trace safe and
$\varphi \in \IO_{\it F}$ then $\psi(x) \in \IO_{\it F}$.
\end{itemise}
\end{proposition}
\pf
We only prove the last statement here. The other three can be proven in
a similar fashion; see \cite{BFG00}.
Let the transition rules in $R^+$ be $\Lambda$-failure trace safe
and $\varphi \in \IO_{\it F}$. We apply structural induction on $\varphi$.
Take $t \in \IT(t)$, $\psi \in t_R^{-1}(\varphi)$ and $x \in \var(t)$,
such that $x$ occurs only once in $t$, and at a $\Lambda$-liquid position.
\begin{itemise2}
\item In case $\varphi = \top$ we have $\psi(x)=\top \in \IO_{\it F}$.
\item Let $\varphi = \bigwedge_{i \in I} \widetilde{a_i}$.
Then $\psi(x) = \bigwedge_{i \in I} \psi_i(x)$ where
$\psi_i(x) \in t_R^{-1}(\widetilde{a_i})$ for $i \in I$. For $i \in I$
there is an $R$-ruloid $\frac{H}{t\gonotto{a_i}}$ such that
$$\psi_i(x) = \bigwedge_{(x \gonotto{b})
\in H}\!\!\!\!\!\widetilde{b} \,\,\,\, \wedge
\bigwedge_{(x\goto{c}y)\in H}\!\!\!\!\! c\top.$$
By \pr{preservation}, $H$ has no premises of
the form $x \goto{c} y$. Therefore $\psi_i(x) \cong \bigwedge_{j \in J}
\widetilde{b_j} \in \IO_F$.
\item Let $\varphi = a\varphi'$ with $\varphi' \in \IO_{\it F}$.
Then there are an $R$-ruloid $\frac{H}{t\goto{a} u}$ and $\chi \in
u^{-1}_R (\varphi')$ such that
$$\psi(x) = \chi(x) \wedge \bigwedge_{(x \gonotto{b})
\in H}\!\!\!\!\!\widetilde{b} \,\,\,\, \wedge
\bigwedge_{(x\goto{c}y)\in H}\!\!\!\!\! c\chi(y).$$
By \pr{preservation}, $x$ is propagated at most once in
$\frac{H}{t \goto{a} u}$, and only at a $\Lambda$-liquid position.
Moreover, $x$ is not both propagated
and polled in $\frac{H}{t \goto{a} u}$. We consider three cases.
\begin{itemise3}
\item
Suppose $x \in \var(u)$. Then $x$ is propagated, so it occurs only
once in $u$, and at a $\Lambda$-liquid position. By induction $\chi(x)
\in \IO_F$. Furthermore, $H$ has no premises of the form
\plat{x\gonotto{b}} or $x\goto{c}y$. Hence $\psi(x) \cong \chi(x)\in \IO_F$.
\item
Suppose $x$ is propagated, but does not occur in $u$. Then $\chi(x)\cong\top$.
Furthermore, $H$ contains no premises of the form \plat{x
\gonotto{b}} and exactly one of the form $x \goto{c} y$, where $y$
occurs in $u$. So $\psi(x) \cong c \chi(y)$.
As $y$ is $\Lambda$-floating in $\frac{H}{t\goto{a} u}$ and does
not occur in $t$, \pr{preservation} guarantees that $y$ occurs
only once in $u$, and at a $\Lambda$-liquid position. By induction
$\chi(y) \!\in\! \IO_{\it F}$, so $\psi(x) \!\in\! \IO_F$.
\item
Suppose $x$ is not propagated. Then $x\not\in\var(u)$ so $\chi(x)\cong\top$.
By \pr{preservation}, $x$ is polled at most once
in $\frac{H}{t\goto{a}u}$, and in a positive premise. Hence $H$
contains no literals of the from \plat{x \gonotto b}, and no more than
one literal \plat{x \goto{c} y}. If there is such a literal, $y$ does
not occur in $u$, so $\chi(y)\cong\top$. Hence $\psi(x) \cong \top \in
\IO_{\it F}$ or $\psi(x) \cong c\top \in \IO_{\it F}$.
\hfill $\Box$\vspace{8pt}
\end{itemise3}
\end{itemise2}
Let $N$ range over $\{$ready trace, readiness, failure trace, failure$\}$,
where {\em failure format} means failure trace format.
\begin{proposition}{formula preservation frozen}
Let $R$ be a standard TSS in $N$ format,
$t \in \IT(\Sigma)$, $\varphi \in \IO$, $\psi \in
t_R^{-1}(\varphi)$ and $x \in \var(t)$.
If $\varphi \in \IO_{N}^\wedge$ then $\psi(x) \in \IO_{\it N}^\wedge$.
\end{proposition}
After \pr{formula preservation}, the proof of \pr{formula
preservation frozen} is rather simple; see \cite{BFG00}.
This is sufficient to prove
Theorems~\mbox{\ref{thm-ready-trace}--\ref{thm-failure-trace}}.
In the light of \df{precongruence} these theorems can be reformulated
as in the following corollary.
\begin{corollary}{congruence}
Let $R$ be a standard TSS in $N$ format, $t \in \IT(\Sigma)$ and $\sigma,
\sigma'$ closed substitutions.
If $\sigma(x) \sqsubseteq_N^R \sigma'(x)$ for $x \in \var(t)$, then
$\sigma(t) \sqsubseteq_N^R \sigma'(t)$.
\end{corollary}
\begin{proof}
By \pr{modal} it suffices to show that
$\fO_N^\wedge(\sigma(x)) \subseteq \fO_N^\wedge(\sigma'(x))$ for all $x
\in \var(t)$ implies $\fO_N(\sigma(t)) \subseteq \fO_N(\sigma'(t))$.
Suppose that, for $x \!\in\! \var(t)$, $\fO_N^\wedge(\sigma(x))
\subseteq \fO_N^\wedge(\sigma'(x))$.
Let $\varphi \in \fO_N(\sigma(t))$, i.e.\
$\varphi \!\in\! \IO_N$ and $\sigma(t) \!\models_R \!\varphi$.
By \pr{inverse} $$\exists \psi \in t^{-1}_R
(\varphi) \forall x\!\in\! \var(t): \sigma(x) \models_R \psi(x).$$
By \pr{formula preservation frozen} $\forall x \!\in\! \var(t): \psi(x)
\in \IO_N^\wedge$. Thus\linebreak $\forall x \in \var(t): \psi(x) \in
\fO_N^\wedge (\sigma(x)) \subseteq \fO_N^\wedge (\sigma'(x))$.
Hence $\forall x \!\in\! \var(t): \sigma'(x) \models_R \psi(x)$.
So by \pr{inverse} $\sigma'(t) \models_R \varphi$.
It follows that $\varphi \in \fO_N(\sigma'(t))$, which had to
be proved.
\end{proof}
\section{Full abstraction}
\label{sec:fullabstraction}
We say that an equivalence on processes is {\em fully abstract}
w.r.t.\ a syntactic format for TSSs and an equivalence $=_{\it obs}$
on processes if it is the coarsest congruence w.r.t.\ all operators
specifiable by a TSS in that format that is finer than $=_{\it obs}$.
The proofs of the following full abstraction results can be found
in \cite{BFG00}.
\begin{theorem}{full abstraction 2}
Ready trace equivalence is fully
abstract for the ready trace format and trace equivalence.
Readiness equivalence is fully abstract for the readiness format
and trace equivalence.
Trace equivalence is fully abstract for the failure trace
format and trace equivalence.
Failure equivalence is fully abstract for the failure trace
format and completed trace equivalence.
\end{theorem}
\section{Counterexamples}
\label{sec:counterexamples}
This section presents a string of counterexamples of complete standard
TSSs in ntyft/ntyxt format, to show that the syntactic restrictions of
our precongruence formats are essential. In \cite{GrV92} a series of
counterexamples can be found showing that the syntactic restrictions
of the ntyft/ntyxt format are essential as well.
\subsection{Basic process algebra}
The examples in this section assume basic process algebra \cite{BK84}.
We assume a collection $A$ of constants, called {\em atomic actions},
representing indivisible behaviour, and two special constants:
the {\em deadlock} $\delta$ does not display any behaviour, while the
{\em empty process} $\varepsilon$ \cite{Vra97} terminates successfully.
Basic process algebra moreover includes function symbols $\_+\_$ and
$\_\cdot\_$ of arity two, called {\em alternative composition} and
{\em sequential composition}, respectively. Intuitively, $t_1+t_2$
executes either $t_1$ or $t_2$, while $t_1\cdot t_2$ first executes $t_1$
and upon successful termination executes $t_2$. These intuitions are made precise
by means of the standard TSS for BPA$_{\delta\varepsilon}$ presented
in Table \ref{tab:bpa}, where the label $a$ ranges over the set $A$ of
atomic actions together with a special label $\surd$, representing
successful termination.
Note that this TSS is positive and in ready simulation format.
It is not hard to check that the TSS is in failure trace format (and
so by default in ready trace and readiness format),
if we take at least the first argument
of sequential composition to be liquid. In particular, in the first
rule for sequential composition the floating variable $y$ occurs in a liquid
argument of the target, and in the second rule for sequential composition the
floating variable $x_1$ is polled only once and not propagated.
\begin{table}[ht]
\centering
\caption{TSS for BPA$_{\delta\varepsilon}$}
\label{tab:bpa}
$
\begin{array}{|cc|}
\hline
&\\
a\goto a \varepsilon~(a\not=\surd)&
\varepsilon\goto\surd\delta\\
&\\
\Rule{x_1\goto a y}{x_1+x_2\goto a y}&
\Rule{x_2\goto a y}{x_1+x_2\goto a y}\\
&\\
\Rule{x_1\goto a y}{x_1\cdot x_2\goto a y\cdot x_2}~(a\not=\surd)&
\Rule{x_1\goto \surd y_1~~~~x_2\goto a y_2}{x_1\cdot x_2\goto a y_2}\\
&\\
\hline
\end{array}
$
\end{table}
Terms $t_1\cdot t_2$ are abbreviated to $t_1 t_2$. Brackets are used
for disambiguation only, assuming associativity of $+$ and $\cdot$,
and letting $\cdot$ bind stronger than $+$. In the remainder of this
section we assume that $A=\{a,b,c,d\}$. Moreover, we assume unary
function symbols $f$ and $h$ and a binary function symbol $g$.
\subsection{Lookahead}
The following counterexample shows that the ready trace
format (and its more restrictive analogues) cannot allow lookahead.
\begin{example}{lookahead}
We extend BPA$_{\delta\varepsilon}$ with the following rule,
containing lookahead:
\[
\frac{x\goto b y_1\hspace{5mm}y_1\goto c y_2}{f(x)\goto a \delta}
\]
It is easy to see that $bd\sqsubseteq_{RT}bc+bd$
(so {\em a fortiori} $bd\sqsubseteq_N bc+bd$ for
$N\in\{R,FT,F\}$).
The empty trace is a completed trace of $f(bd)$
but not of $f(bc+bd)$ (as $f(bc+bd)\goto a \delta$).
Hence, $f(bd)\not\sqsubseteq_{CT}f(bc+bd)$
(and {\em a fortiori} $f(bd)\not\sqsubseteq_N f(bc+bd)$
for $N\in\{RT,R,FT,F\}$).
\end{example}
\subsection{Multiple propagations}
The following counterexample shows that the
ready trace format (and its more restrictive analogues) cannot allow
a liquid argument of the source
to be propagated more than once in the left-hand sides of
the positive premises.
\begin{example}{ex2}
Let the arguments of $f$ and $g$ be liquid.
We extend BPA$_{\delta\varepsilon}$ with the rules:
\[
\frac{x\goto a y}{f(x)\goto a f(y)}
\hspace{5mm}
\frac{x\goto b y_1\hspace{5mm}x\goto b y_2}{f(x)\goto b g(y_1,y_2)}
\]
\[
\frac{x_1\goto c y_1\hspace{5mm}x_2\goto d y_2}{g(x_1,x_2)\goto d \delta}
\]
In the second rule, the liquid argument $x$ of
the source is propagated in the left-hand sides of both premises.
It is easy to see that $a(bc+bd)\sqsubseteq_{RT}abc+abd$
(so {\em a fortiori} $a(bc+bd)\sqsubseteq_N abc+abd$ for
$N\in\{R,FT,F\}$).
Note that $abd$ is a trace of $f(a(bc+bd))$
(as $f(a(bc+bd))\goto a f(bc+bd)\goto b
g(c,d)\goto d \delta$), but not of $f(abc+abd)$.
Hence, $f(a(bc+bd))\not\sqsubseteq_{T}f(abc+abd)$ (and
{\em a fortiori} $f(a(bc+bd))\not\sqsubseteq_N f(abc+abd)$
for $N\in\{RT,R,FT,F\}$).
\end{example}
A similar example can be given to show that the ready trace
format cannot allow a liquid argument of the source or a right-hand
side of a positive premise to occur more than once in the target.
Likewise, an example can be given to show that the ready trace
format cannot allow a liquid argument of the source to be propagated
in the left-hand side of a positive premise and at the same time to
occur in the target.
\subsection{Propagation at a non-liquid position}
If in the example above the argument of $f$ were defined
to be frozen, then in the first rule the right-hand side
$y$ of the premise would occur in a non-liquid position in the target.
This shows that the ready trace format cannot allow
right-hand sides of positive premises to occur at
non-liquid positions in the target. Variants of \ex{ex2}
show that the ready trace format cannot allow liquid arguments
of the source to be propagated at non-liquid positions either.
\begin{example}{non-liquid propagation}
Replace the second rule in \ex{ex2} by the two rules
\[
\frac{h(x)\goto b y}{f(x)\goto b y}
\hspace{5mm}
\frac{x\goto b y_1\hspace{5mm}x\goto b y_2}{h(x)\goto b g(y_1,y_2)}
\]
Taking the arguments of $f$ en $g$ liquid, but that of $h$ frozen,
the resulting TSS sins against the ready trace format only in that in
the first rule above the floating variable $x$ is propagated at a
non-liquid position, namely as argument of $h$. Clearly the same mishap
as in \ex{ex2} ensues.\pagebreak[3]
The same argument applies when replacing the second rule in \ex{ex2}
by the two rules
\[
f(x)\goto a h(x)
\hspace{5mm}
\frac{x\goto b y_1\hspace{5mm}x\goto b y_2}{h(x)\goto b g(y_1,y_2)}
\]
\end{example}
The examples above show that if a floating variable $x$ is propagated
in a term $f(x)$, then the argument of $f$ should be classified as
liquid. Furthermore, if $x$ is propagated in a term $f(h(x))$, then
{\em both} the argument of $f$ and that of $h$ should be classified as
liquid. Namely, if only the argument of $f$ would be liquid, a rule with
conclusion \plat{h(x) \goto{b} g(x,x)} could have fatal consequences, and
if only the argument of $h$ would be liquid, a rule with conclusion
\plat{f(x) \goto{b} g(x,x)} could be fatal. (It is left to the reader
to fill in the details.) This justifies the definition of
$\Lambda$-liquid in Section \ref{sec:formats}.
\subsection{Propagation together with polling}
The following counterexample shows that the readiness
format cannot allow that a liquid
argument of the source is both propagated and polled.
(The TSS in this example is in a flawed
congruence format for failure equivalence from \cite{vG93a}.)
\begin{example}{ex3}
Let the arguments of $f$ and $h$ be liquid.
We extend BPA$_{\delta\varepsilon}$ with the rules:
\[
\frac{x\goto a y}{f(x)\goto a f(y)}
\hspace{5mm} \frac{x\goto b y}{f(x)\goto b h(x)}
\]
\[
\frac{x\goto c y}{h(x)\goto c h(y)}
\hspace{8mm} \frac{x\goto d y}{h(x)\goto d \delta}
\]
In the second rule, the liquid argument $x$
of the source is both propagated and polled.
It is not hard to see that $a(b+cd)+ac$ and
$a(b+c)+acd$ are readiness and
failure equivalent (but not ready trace or failure trace equivalent).
Nevertheless, $abcd$ is a trace of $f(a(b+cd)+ac)$
(as \plat{f(a(b+cd)+ac)\goto a f(b+cd)
\goto b h(b+cd)\goto c} \plat{h(d)\goto d \delta}),
but not of $f(a(b+c)+acd)$. Hence,
$f(a(b+cd)+ac)$ and $f(a(b+c)+acd)$
are not even trace equivalent.
It is easy to see that $a(b+c)+ab+ac$ and $ab+ac$ are failure trace and
failure equivalent (but not ready trace or readiness equivalent).
Note that $abc$ is a trace of $f(a(b+c)+ab+ac)$
(as $f(a(b+c)+ab+ac)\goto a f(b+c) \goto b h(b+c)\goto c h(\varepsilon)$),
but not of $f(ab+ac)$. Hence, $f(a(b+c)+ab+ac)$ and $f(ab+ac)$
are not even trace equivalent.
\end{example}
\subsection{Multiple pollings}
The following counterexample shows that the failure trace
format cannot allow that a liquid argument of the source
is polled more than once.
\begin{example}{ex4}
Let the argument of $f$ be liquid.
We extend BPA$_{\delta\varepsilon}$ with the rules:
\[
\frac{x\goto a y}{f(x)\goto a f(y)}
\hspace{5mm}
\frac{x\goto b y_1\hspace{5mm}x\goto c y_2}{f(x)\goto d \delta}
\]
In the second rule, the liquid argument $x$
of the source is polled in the two positive premises.
We recall that $a(b+c)+ab+ac$
and $ab+ac$ are failure trace and failure equivalent.
Nevertheless, $ad$ is a trace of $f(a(b+c)+ab+ac)$
(as \plat{f(a(b+c)+ab+ac)\goto a f(b+c)
\goto d \delta}), but not of $f(ab+ac)$. Hence,
$f(a(b+c)+ab+ac)$ and $f(ab+ac)$
are not even trace equivalent.
\end{example}
\subsection{Polling at a non-liquid position}
The following variant of \ex{ex4} shows that the failure trace
format cannot allow that a liquid argument of the source is polled
at non-liquid positions.
\begin{example}{ex6}
Let the argument of $f$ be liquid and the argument of $h$ be frozen.
We extend BPA$_{\delta\varepsilon}$ with the rules:
\[
\frac{x\goto a y}{f(x)\goto a f(y)}
\hspace{5mm}
\frac{h(x)\goto b y}{f(x)\goto b \delta}
\hspace{5mm}
\frac{x\goto b y_1\hspace{5mm}x\goto c y_2}{h(x)\goto d \delta}
\]
In the second rule, the liquid argument $x$
of the source is polled at a non-liquid position.
Clearly the same mishap as in \ex{ex4} ensues.
\end{example}
\subsection{Polling in a negative premise}
The following counterexample shows that the failure trace
format cannot allow that a liquid argument of the source
is polled in a negative premise.
\begin{example}{ex5}
Let the argument of $f$ be liquid.
We extend BPA$_{\delta\varepsilon}$ with the rules:
\[
\frac{x\goto a y}{f(x)\goto a f(y)}
\hspace{5mm}
\frac{x\gonotto b}{f(x)\goto d \delta}
\hspace{5mm}
\frac{x\gonotto c}{f(x)\goto d \delta}
\]
In the second and third rule, the liquid argument $x$
of the source is polled in the negative premise.
We recall that $a(b+c)+ab+ac$
and $ab+ac$ are failure trace and failure equivalent.
Note that $a$ is a completed trace of $f(a(b+c)+ab+ac)$
(as \plat{f(a(b+c)+ab+ac)\goto a f(b+c) \gonotto{d}}), but not of $f(ab+ac)$.
Hence, $f(a(b+c)+ab+ac)$ and $f(ab+ac)$
are not even completed trace equivalent.
\end{example}
\section{Applications}
\label{sect:applications}
This section contains some applications of our precongruence
formats to TSSs from the literature.
\subsection{Priority}
\label{sect:priority}
{\em Priority} \cite{BBK86} is a unary function symbol that assumes an ordering
on atomic actions. The term $\Theta(t)$ executes the transitions of $t$,
with the restriction that a transition $t\goto a t'$ only gives rise
to a transition $\Theta(t)\goto a \Theta(t')$ if there does not exist
a transition \plat{t\goto b t''} with $a