--- /dev/null
+(*
+Regular Expressions Equivalence
+*)
+
+include "tutorial/chapter9.ma".
+
+(* We say that two pres 〈i_1,b_1〉 and 〈i_1,b_1〉 are {\em cofinal} if and
+only if b_1 = b_2. *)
+
+definition cofinal ≝ λS.λp:(pre S)×(pre S).
+ \snd (\fst p) = \snd (\snd p).
+
+(* As a corollary of decidable_sem, we have that two expressions
+e1 and e2 are equivalent iff for any word w the states reachable
+through w are cofinal. *)
+
+theorem equiv_sem: ∀S:DeqSet.∀e1,e2:pre S.
+ \sem{e1} =1 \sem{e2} ↔ ∀w.cofinal ? 〈moves ? w e1,moves ? w e2〉.
+#S #e1 #e2 %
+[#same_sem #w
+ cut (∀b1,b2. iff (b1 = true) (b2 = true) → (b1 = b2))
+ [* * // * #H1 #H2 [@sym_eq @H1 //| @H2 //]]
+ #Hcut @Hcut @iff_trans [|@decidable_sem]
+ @iff_trans [|@same_sem] @iff_sym @decidable_sem
+|#H #w1 @iff_trans [||@decidable_sem] <H @iff_sym @decidable_sem]
+qed.
+
+(* This does not directly imply decidability: we have no bound over the
+length of w; moreover, so far, we made no assumption over the cardinality
+of S. Instead of requiring S to be finite, we may restrict the analysis
+to characters occurring in the given pres. *)
+
+definition occ ≝ λS.λe1,e2:pre S.
+ unique_append ? (occur S (|\fst e1|)) (occur S (|\fst e2|)).
+
+lemma occ_enough: ∀S.∀e1,e2:pre S.
+(∀w.(sublist S w (occ S e1 e2))→ cofinal ? 〈moves ? w e1,moves ? w e2〉)
+ →∀w.cofinal ? 〈moves ? w e1,moves ? w e2〉.
+#S #e1 #e2 #H #w
+cases (decidable_sublist S w (occ S e1 e2)) [@H] -H #H
+ >to_pit [2: @(not_to_not … H) #H1 #a #memba @sublist_unique_append_l1 @H1 //]
+ >to_pit [2: @(not_to_not … H) #H1 #a #memba @sublist_unique_append_l2 @H1 //]
+ //
+qed.
+
+(* The following is a stronger version of equiv_sem, relative to characters
+occurring the given regular expressions. *)
+
+lemma equiv_sem_occ: ∀S.∀e1,e2:pre S.
+(∀w.(sublist S w (occ S e1 e2))→ cofinal ? 〈moves ? w e1,moves ? w e2〉)
+→ \sem{e1}=1\sem{e2}.
+#S #e1 #e2 #H @(proj2 … (equiv_sem …)) @occ_enough #w @H
+qed.
+
+(*
+Bisimulations
+
+
+We say that a list of pairs of pres is a bisimulation if it is closed
+w.r.t. moves, and all its members are cofinal.
+*)
+
+definition sons ≝ λS:DeqSet.λl:list S.λp:(pre S)×(pre S).
+ map ?? (λa.〈move S a (\fst (\fst p)),move S a (\fst (\snd p))〉) l.
+
+lemma memb_sons: ∀S,l.∀p,q:(pre S)×(pre S). memb ? p (sons ? l q) = true →
+ ∃a.(move ? a (\fst (\fst q)) = \fst p ∧
+ move ? a (\fst (\snd q)) = \snd p).
+#S #l elim l [#p #q normalize in ⊢ (%→?); #abs @False_ind /2/]
+#a #tl #Hind #p #q #H cases (orb_true_l … H) -H
+ [#H @(ex_intro … a) >(\P H) /2/ |#H @Hind @H]
+qed.
+
+definition is_bisim ≝ λS:DeqSet.λl:list ?.λalpha:list S.
+ ∀p:(pre S)×(pre S). memb ? p l = true → cofinal ? p ∧ (sublist ? (sons ? alpha p) l).
+
+(* Using lemma equiv_sem_occ it is easy to prove the following result: *)
+
+lemma bisim_to_sem: ∀S:DeqSet.∀l:list ?.∀e1,e2: pre S.
+ is_bisim S l (occ S e1 e2) → memb ?〈e1,e2〉 l = true → \sem{e1}=1\sem{e2}.
+#S #l #e1 #e2 #Hbisim #Hmemb @equiv_sem_occ
+#w #Hsub @(proj1 … (Hbisim 〈moves S w e1,moves S w e2〉 ?))
+lapply Hsub @(list_elim_left … w) [//]
+#a #w1 #Hind #Hsub >moves_left >moves_left @(proj2 …(Hbisim …(Hind ?)))
+ [#x #Hx @Hsub @memb_append_l1 //
+ |cut (memb S a (occ S e1 e2) = true) [@Hsub @memb_append_l2 //] #occa
+ @(memb_map … occa)
+ ]
+qed.
+
+(* This is already an interesting result: checking if l is a bisimulation
+is decidable, hence we could generate l with some untrusted piece of code
+and then run a (boolean version of) is_bisim to check that it is actually
+a bisimulation.
+However, in order to prove that equivalence of regular expressions
+is decidable we must prove that we can always effectively build such a list
+(or find a counterexample).
+The idea is that the list we are interested in is just the set of
+all pair of pres reachable from the initial pair via some
+sequence of moves.
+
+The algorithm for computing reachable nodes in graph is a very
+traditional one. We split nodes in two disjoint lists: a list of
+visited nodes and a frontier, composed by all nodes connected
+to a node in visited but not visited already. At each step we select a node
+a from the frontier, compute its sons, add a to the set of
+visited nodes and the (not already visited) sons to the frontier.
+
+Instead of fist computing reachable nodes and then performing the
+bisimilarity test we can directly integrate it in the algorithm:
+the set of visited nodes is closed by construction w.r.t. reachability,
+so we have just to check cofinality for any node we add to visited.
+
+Here is the extremely simple algorithm: *)
+
+let rec bisim S l n (frontier,visited: list ?) on n ≝
+ match n with
+ [ O ⇒ 〈false,visited〉 (* assert false *)
+ | S m ⇒
+ match frontier with
+ [ nil ⇒ 〈true,visited〉
+ | cons hd tl ⇒
+ if beqb (\snd (\fst hd)) (\snd (\snd hd)) then
+ bisim S l m (unique_append ? (filter ? (λx.notb (memb ? x (hd::visited)))
+ (sons S l hd)) tl) (hd::visited)
+ else 〈false,visited〉
+ ]
+ ].
+
+(* The integer n is an upper bound to the number of recursive call,
+equal to the dimension of the graph. It returns a pair composed
+by a boolean and a the set of visited nodes; the boolean is true
+if and only if all visited nodes are cofinal.
+
+The following results explicitly state the behaviour of bisim is the general
+case and in some relevant instances *)
+
+lemma unfold_bisim: ∀S,l,n.∀frontier,visited: list ?.
+ bisim S l n frontier visited =
+ match n with
+ [ O ⇒ 〈false,visited〉 (* assert false *)
+ | S m ⇒
+ match frontier with
+ [ nil ⇒ 〈true,visited〉
+ | cons hd tl ⇒
+ if beqb (\snd (\fst hd)) (\snd (\snd hd)) then
+ bisim S l m (unique_append ? (filter ? (λx.notb(memb ? x (hd::visited)))
+ (sons S l hd)) tl) (hd::visited)
+ else 〈false,visited〉
+ ]
+ ].
+#S #l #n cases n // qed.
+
+lemma bisim_never: ∀S,l.∀frontier,visited: list ?.
+ bisim S l O frontier visited = 〈false,visited〉.
+#frontier #visited >unfold_bisim //
+qed.
+
+lemma bisim_end: ∀Sig,l,m.∀visited: list ?.
+ bisim Sig l (S m) [] visited = 〈true,visited〉.
+#n #visisted >unfold_bisim //
+qed.
+
+lemma bisim_step_true: ∀Sig,l,m.∀p.∀frontier,visited: list ?.
+beqb (\snd (\fst p)) (\snd (\snd p)) = true →
+ bisim Sig l (S m) (p::frontier) visited =
+ bisim Sig l m (unique_append ? (filter ? (λx.notb(memb ? x (p::visited)))
+ (sons Sig l p)) frontier) (p::visited).
+#Sig #l #m #p #frontier #visited #test >unfold_bisim whd in ⊢ (??%?); >test //
+qed.
+
+lemma bisim_step_false: ∀Sig,l,m.∀p.∀frontier,visited: list ?.
+beqb (\snd (\fst p)) (\snd (\snd p)) = false →
+ bisim Sig l (S m) (p::frontier) visited = 〈false,visited〉.
+#Sig #l #m #p #frontier #visited #test >unfold_bisim whd in ⊢ (??%?); >test //
+qed.
+
+(* In order to prove termination of bisim we must be able to effectively
+enumerate all possible pres: *)
+
+(* lemma notb_eq_true_l: ∀b. notb b = true → b = false.
+#b cases b normalize //
+qed. *)
+
+let rec pitem_enum S (i:re S) on i ≝
+ match i with
+ [ z ⇒ (pz S)::[]
+ | e ⇒ (pe S)::[]
+ | s y ⇒ (ps S y)::(pp S y)::[]
+ | o i1 i2 ⇒ compose ??? (po S) (pitem_enum S i1) (pitem_enum S i2)
+ | c i1 i2 ⇒ compose ??? (pc S) (pitem_enum S i1) (pitem_enum S i2)
+ | k i ⇒ map ?? (pk S) (pitem_enum S i)
+ ].
+
+lemma pitem_enum_complete : ∀S.∀i:pitem S.
+ memb (DeqItem S) i (pitem_enum S (|i|)) = true.
+#S #i elim i
+ [1,2://
+ |3,4:#c normalize >(\b (refl … c)) //
+ |5,6:#i1 #i2 #Hind1 #Hind2 @(memb_compose (DeqItem S) (DeqItem S)) //
+ |#i #Hind @(memb_map (DeqItem S)) //
+ ]
+qed.
+
+definition pre_enum ≝ λS.λi:re S.
+ compose ??? (λi,b.〈i,b〉) ( pitem_enum S i) (true::false::[]).
+
+lemma pre_enum_complete : ∀S.∀e:pre S.
+ memb ? e (pre_enum S (|\fst e|)) = true.
+#S * #i #b @(memb_compose (DeqItem S) DeqBool ? (λi,b.〈i,b〉))
+// cases b normalize //
+qed.
+
+definition space_enum ≝ λS.λi1,i2: re S.
+ compose ??? (λe1,e2.〈e1,e2〉) ( pre_enum S i1) (pre_enum S i2).
+
+lemma space_enum_complete : ∀S.∀e1,e2: pre S.
+ memb ? 〈e1,e2〉 ( space_enum S (|\fst e1|) (|\fst e2|)) = true.
+#S #e1 #e2 @(memb_compose … (λi,b.〈i,b〉))
+// qed.
+
+definition all_reachable ≝ λS.λe1,e2: pre S.λl: list ?.
+uniqueb ? l = true ∧
+ ∀p. memb ? p l = true →
+ ∃w.(moves S w e1 = \fst p) ∧ (moves S w e2 = \snd p).
+
+definition disjoint ≝ λS:DeqSet.λl1,l2.
+ ∀p:S. memb S p l1 = true → memb S p l2 = false.
+
+(* We are ready to prove that bisim is correct; we use the invariant
+that at each call of bisim the two lists visited and frontier only contain
+nodes reachable from 〈e_1,e_2〉, hence it is absurd to suppose to meet a pair
+which is not cofinal. *)
+
+lemma bisim_correct: ∀S.∀e1,e2:pre S.\sem{e1}=1\sem{e2} →
+ ∀l,n.∀frontier,visited:list ((pre S)×(pre S)).
+ |space_enum S (|\fst e1|) (|\fst e2|)| < n + |visited|→
+ all_reachable S e1 e2 visited →
+ all_reachable S e1 e2 frontier →
+ disjoint ? frontier visited →
+ \fst (bisim S l n frontier visited) = true.
+#Sig #e1 #e2 #same #l #n elim n
+ [#frontier #visited #abs * #unique #H @False_ind @(absurd … abs)
+ @le_to_not_lt @sublist_length // * #e11 #e21 #membp
+ cut ((|\fst e11| = |\fst e1|) ∧ (|\fst e21| = |\fst e2|))
+ [|* #H1 #H2 <H1 <H2 @space_enum_complete]
+ cases (H … membp) #w * #we1 #we2 <we1 <we2 % >same_kernel_moves //
+ |#m #HI * [#visited #vinv #finv >bisim_end //]
+ #p #front_tl #visited #Hn * #u_visited #r_visited * #u_frontier #r_frontier
+ #disjoint
+ cut (∃w.(moves ? w e1 = \fst p) ∧ (moves ? w e2 = \snd p))
+ [@(r_frontier … (memb_hd … ))] #rp
+ cut (beqb (\snd (\fst p)) (\snd (\snd p)) = true)
+ [cases rp #w * #fstp #sndp <fstp <sndp @(\b ?)
+ @(proj1 … (equiv_sem … )) @same] #ptest
+ >(bisim_step_true … ptest) @HI -HI
+ [<plus_n_Sm //
+ |% [whd in ⊢ (??%?); >(disjoint … (memb_hd …)) whd in ⊢ (??%?); //
+ |#p1 #H (cases (orb_true_l … H)) [#eqp >(\P eqp) // |@r_visited]
+ ]
+ |whd % [@unique_append_unique @(andb_true_r … u_frontier)]
+ @unique_append_elim #q #H
+ [cases (memb_sons … (memb_filter_memb … H)) -H
+ #a * #m1 #m2 cases rp #w1 * #mw1 #mw2 @(ex_intro … (w1@(a::[])))
+ >moves_left >moves_left >mw1 >mw2 >m1 >m2 % //
+ |@r_frontier @memb_cons //
+ ]
+ |@unique_append_elim #q #H
+ [@injective_notb @(memb_filter_true … H)
+ |cut ((q==p) = false)
+ [|#Hpq whd in ⊢ (??%?); >Hpq @disjoint @memb_cons //]
+ cases (andb_true … u_frontier) #notp #_ @(\bf ?)
+ @(not_to_not … not_eq_true_false) #eqqp <notp <eqqp >H //
+ ]
+ ]
+ ]
+qed.
+
+(* For completeness, we use the invariant that all the nodes in visited are cofinal,
+and the sons of visited are either in visited or in the frontier; since
+at the end frontier is empty, visited is hence a bisimulation. *)
+
+definition all_true ≝ λS.λl.∀p:(pre S) × (pre S). memb ? p l = true →
+ (beqb (\snd (\fst p)) (\snd (\snd p)) = true).
+
+definition sub_sons ≝ λS,l,l1,l2.∀x:(pre S) × (pre S).
+memb ? x l1 = true → sublist ? (sons ? l x) l2.
+
+lemma bisim_complete:
+ ∀S,l,n.∀frontier,visited,visited_res:list ?.
+ all_true S visited →
+ sub_sons S l visited (frontier@visited) →
+ bisim S l n frontier visited = 〈true,visited_res〉 →
+ is_bisim S visited_res l ∧ sublist ? (frontier@visited) visited_res.
+#S #l #n elim n
+ [#fron #vis #vis_res #_ #_ >bisim_never #H destruct
+ |#m #Hind *
+ [(* case empty frontier *)
+ -Hind #vis #vis_res #allv #H normalize in ⊢ (%→?);
+ #H1 destruct % #p
+ [#membp % [@(\P ?) @allv //| @H //]|#H1 @H1]
+ |#hd cases (true_or_false (beqb (\snd (\fst hd)) (\snd (\snd hd))))
+ [|(* case head of the frontier is non ok (absurd) *)
+ #H #tl #vis #vis_res #allv >(bisim_step_false … H) #_ #H1 destruct]
+ (* frontier = hd:: tl and hd is ok *)
+ #H #tl #visited #visited_res #allv >(bisim_step_true … H)
+ (* new_visited = hd::visited are all ok *)
+ cut (all_true S (hd::visited))
+ [#p #H1 cases (orb_true_l … H1) [#eqp >(\P eqp) @H |@allv]]
+ (* we now exploit the induction hypothesis *)
+ #allh #subH #bisim cases (Hind … allh … bisim) -bisim -Hind
+ [#H1 #H2 % // #p #membp @H2 -H2 cases (memb_append … membp) -membp #membp
+ [cases (orb_true_l … membp) -membp #membp
+ [@memb_append_l2 >(\P membp) @memb_hd
+ |@memb_append_l1 @sublist_unique_append_l2 //
+ ]
+ |@memb_append_l2 @memb_cons //
+ ]
+ |(* the only thing left to prove is the sub_sons invariant *)
+ #x #membx cases (orb_true_l … membx)
+ [(* case x = hd *)
+ #eqhdx <(\P eqhdx) #xa #membxa
+ (* xa is a son of x; we must distinguish the case xa
+ was already visited form the case xa is new *)
+ cases (true_or_false … (memb ? xa (x::visited)))
+ [(* xa visited - trivial *) #membxa @memb_append_l2 //
+ |(* xa new *) #membxa @memb_append_l1 @sublist_unique_append_l1 @memb_filter_l
+ [>membxa //|//]
+ ]
+ |(* case x in visited *)
+ #H1 #xa #membxa cases (memb_append … (subH x … H1 … membxa))
+ [#H2 (cases (orb_true_l … H2))
+ [#H3 @memb_append_l2 <(\P H3) @memb_hd
+ |#H3 @memb_append_l1 @sublist_unique_append_l2 @H3
+ ]
+ |#H2 @memb_append_l2 @memb_cons @H2
+ ]
+ ]
+ ]
+ ]
+qed.
+
+(* We can now give the definition of the equivalence algorithm, and
+prove that two expressions are equivalente if and only if they define
+the same language. *)
+
+definition equiv ≝ λSig.λre1,re2:re Sig.
+ let e1 ≝ •(blank ? re1) in
+ let e2 ≝ •(blank ? re2) in
+ let n ≝ S (length ? (space_enum Sig (|\fst e1|) (|\fst e2|))) in
+ let sig ≝ (occ Sig e1 e2) in
+ (bisim ? sig n (〈e1,e2〉::[]) []).
+
+theorem euqiv_sem : ∀Sig.∀e1,e2:re Sig.
+ \fst (equiv ? e1 e2) = true ↔ \sem{e1} =1 \sem{e2}.
+#Sig #re1 #re2 %
+ [#H @eqP_trans [|@eqP_sym @re_embedding] @eqP_trans [||@re_embedding]
+ cut (equiv ? re1 re2 = 〈true,\snd (equiv ? re1 re2)〉)
+ [<H //] #Hcut
+ cases (bisim_complete … Hcut)
+ [2,3: #p whd in ⊢ ((??%?)→?); #abs @False_ind /2/]
+ #Hbisim #Hsub @(bisim_to_sem … Hbisim)
+ @Hsub @memb_hd
+ |#H @(bisim_correct ? (•(blank ? re1)) (•(blank ? re2)))
+ [@eqP_trans [|@re_embedding] @eqP_trans [|@H] @eqP_sym @re_embedding
+ |//
+ |% // #p whd in ⊢ ((??%?)→?); #abs @False_ind /2/
+ |% // #p #H >(memb_single … H) @(ex_intro … ϵ) /2/
+ |#p #_ normalize //
+ ]
+ ]
+qed.
+
+definition eqbnat ≝ λn,m:nat. eqb n m.
+
+lemma eqbnat_true : ∀n,m. eqbnat n m = true ↔ n = m.
+#n #m % [@eqb_true_to_eq | @eq_to_eqb_true]
+qed.
+
+definition DeqNat ≝ mk_DeqSet nat eqb eqbnat_true.
+
+definition a ≝ s DeqNat O.
+definition b ≝ s DeqNat (S O).
+definition c ≝ s DeqNat (S (S O)).
+
+definition exp1 ≝ ((a·b)^*·a).
+definition exp2 ≝ a·(b·a)^*.
+definition exp4 ≝ (b·a)^*.
+
+definition exp6 ≝ a·(a ·a ·b^* + b^* ).
+definition exp7 ≝ a · a^* · b^*.
+
+definition exp8 ≝ a·a·a·a·a·a·a·a·(a^* ).
+definition exp9 ≝ (a·a·a + a·a·a·a·a)^*.
+
+example ex1 : \fst (equiv ? (exp8+exp9) exp9) = true.
+normalize // qed.
\ No newline at end of file
--- /dev/null
+(*
+Polymorphism and Higher Order
+
+*)
+include "tutorial/chapter2.ma".
+include "basics/bool.ma".
+
+(* Matita supports polymorphic data types. The most typical case are polymorphic
+lists, parametric in the type of their elements: *)
+
+inductive list (A:Type[0]) : Type[0] ≝
+ | nil: list A
+ | cons: A -> list A -> list A.
+
+(* The type notation list A is the type of all lists with elements of type A:
+it is defined by two constructors: a polymorphic empty list (nil A) and a cons
+operation, adding a new head element of type A to a previous list. For instance,
+(list nat) and and (list bool) are lists of natural numbers and booleans,
+respectively. But we can also form more complex data types, like
+(list (list (nat → nat))), that is a list whose elements are lists of functions
+from natural numbers to natural numbers.
+
+Typical elements in (list bool) are for instance,
+ nil nat - the empty list of type nat
+ cons nat true (nil nat) - the list containing true
+ cons nat false (cons nat (true (nil nat))) - the list containing false and true
+and so on.
+
+Note that both constructos nil and cons are expecting in input the type parameter:
+in this case, bool.
+*)
+
+(*
+Defining your own notation
+
+We now add a bit of notation, in order to make the syntax more readable. In
+particular, we would like to write [] instead of (nil A) and a::l instead of
+(cons A a l), leaving the system the burden to infer A, whenever possible.
+*)
+
+notation "hvbox(hd break :: tl)"
+ right associative with precedence 47
+ for @{'cons $hd $tl}.
+
+notation "[ list0 x sep ; ]"
+ non associative with precedence 90
+ for ${fold right @'nil rec acc @{'cons $x $acc}}.
+
+notation "hvbox(l1 break @ l2)"
+ right associative with precedence 47
+ for @{'append $l1 $l2 }.
+
+interpretation "nil" 'nil = (nil ?).
+interpretation "cons" 'cons hd tl = (cons ? hd tl).
+
+(*
+Basic operations on lists
+
+Let us define a few basic functions over lists. Our first example is the
+append function, concatenating two lists l1 and l2. The natural way is to proceed
+by recursion on l1: if it is empty the result is simply l2, while if l1 = hd::tl
+then we recursively append tl and l2 , and then add hd as first element. Note that
+the append function itself is polymorphic, and the first argument it takes in input
+is the type A of the elements of two lists l1 and l2.
+Moreover, since the append function takes in input several parameters, we must also
+specify in the its defintion on which one of them we are recurring: in this case l1.
+If not othewise specified, recursion is supposed to act on the first argument of the
+function.*)
+
+let rec append A (l1: list A) l2 on l1 ≝
+ match l1 with
+ [ nil ⇒ l2
+ | cons hd tl ⇒ hd :: append A tl l2 ].
+
+interpretation "append" 'append l1 l2 = (append ? l1 l2).
+
+(* As usual, the function is executable. For instance, (append A nil l) reduces to
+l, as shown by the following example: *)
+
+example nil_append: ∀A.∀l:list A. [] @ l = l.
+#A #l normalize // qed.
+
+(* Proving that l @ [] = l is just a bit more complex. The situation is exactly
+the same as for the addition operation of the previous chapter: since append is
+defined by recutsion over the first argument, the computation of l @ [] is stuck,
+and we must proceed by induction on l *)
+
+lemma append_nil: ∀A.∀l:list A.l @ [] = l.
+#A #l (elim l) normalize // qed.
+
+(* similarly, we can define the two functions head and tail. Since we can only define
+total functions, we should decide what to do in case the input list is empty.
+For tl, it is natural to return the empty list; for hd, we take in input a default
+element d of type A to return in this case. *)
+
+definition head ≝ λA.λl: list A.λd:A.
+ match l with [ nil ⇒ d | cons a _ ⇒ a].
+
+definition tail ≝ λA.λl: list A.
+ match l with [ nil ⇒ [] | cons hd tl ⇒ tl].
+
+example ex_head: ∀A.∀a,d,l. head A (a::l) d = a.
+#A #a #d #l normalize // qed.
+
+example ex_tail: tail ? (cons ? true []) = [].
+normalize // qed.
+
+theorem associative_append:
+∀A.∀l1,l2,l3: list A. (l1 @ l2) @ l3 = l1 @ (l2 @ l3).
+#A #l1 #l2 #l3 (elim l1) normalize // qed.
+
+(* Problemi con la notazione *)
+lemma a_append: ∀A.∀a.∀l:list A. (a::[]) @ l = a::l.
+// qed.
+
+theorem append_cons:
+∀A.∀a:A.∀l,l1: list A.l@(a::l1)= (l @ (cons ? a [])) @ l1.
+// qed.
+
+(* Other typical functions over lists are those computing the length
+of a list, and the function returning the nth element *)
+
+let rec length (A:Type[0]) (l:list A) on l ≝
+match l with
+ [ nil ⇒ O
+ | cons a tl ⇒ S (length A tl)].
+
+let rec nth n (A:Type[0]) (l:list A) (d:A) ≝
+ match n with
+ [O ⇒ head A l d
+ |S m ⇒ nth m A (tail A l) d].
+
+example ex_length: length ? (cons ? O []) = S O.
+normalize // qed.
+
+example ex_nth: nth (S O) ? (cons ? (S O) (cons ? O [])) O = O.
+normalize // qed.
+
+(* Proving that the length of l1@l2 is the sum of the lengths of l1
+and l2 just requires a trivial induction on the first list. *)
+
+ lemma length_add: ∀A.∀l1,l2:list A.
+ length ? (l1@l2) = add (length ? l1) (length ? l2).
+#A #l1 elim l1 normalize // qed.
+
+(*
+Comparing Costructors
+
+Let us come to a more interesting question. How can we prove that the empty
+list is different from any list with at least one element, that is from any list
+of the kind (a::l)? We start defining a simple predicate stating if a list is
+empty or not. The predicate is computed by inspection over the list *)
+
+definition is_nil: ∀A:Type[0].list A → Prop ≝
+λA.λl.match l with [ nil ⇒ l = [] | cons hd tl ⇒ (l ≠ [])].
+
+(* Next we need a simple result about negation: if you wish to prove ¬P you are
+authorized to add P to your hypothesis: *)
+
+lemma neg_aux : ∀P:Prop. (P → ¬P) → ¬P.
+#P #PtonegP % /3/ qed.
+
+theorem diff_cons_nil:
+∀A:Type[0].∀l:list A.∀a:A. a::l ≠ [].
+#A #l #a @neg_aux #Heq
+(* we start assuming the new hypothesis Heq of type a::l = [] using neg_aux.
+Next we use the change tactic to pass from the current goal a::l≠ [] to the
+expression is_nil a::l, convertible with it. *)
+(change with (is_nil ? (a::l)))
+(* Now, we rewrite with Heq, obtaining (is_nil A []), that reduces to the trivial
+goal [] = [] *)
+>Heq // qed.
+
+(* As an application of the previous result let us prove that l1@l2 is empty if
+and only if both l1 and l2 are empty.
+The idea is to proceed by cases on l1: if l1=[] the statement is trivial; on the
+other side, if l1 = a::tl, then the hypothesis (a::tl)@l2 = [] is absurd, hence we
+can prove anything from it.
+When we know we can prove both A and ¬A, a sensible way to proceed is to apply
+False_ind: ∀P.False → P to the current goal, that breaks down to prove False, and
+then absurd: ∀A:Prop. A → ¬A → False to reduce to the contradictory cases.
+Usually, you may invoke automation to take care to solve the absurd case. *)
+
+lemma nil_to_nil: ∀A.∀l1,l2:list A.
+ l1@l2 = [] → l1 = [] ∧ l2 = [].
+#A #l1 cases l1 normalize /2/ #a #tl #l2 #H @False_ind /2/ qed.
+
+(*
+Higher Order Functionals
+
+Let us come to some important, higher order, polymorphic functionals
+acting over lists. A typical example is the map function, taking a function
+f:A → B, a list l = [a1; a2; ... ; an] and returning the list
+[f a1; f a2; ... ; f an]. *)
+
+let rec map (A,B:Type[0]) (f: A → B) (l:list A) on l: list B ≝
+ match l with [ nil ⇒ [] | cons x tl ⇒ f x :: (map A B f tl)].
+
+(* Another major example is the fold function, that taken a list
+l = [a1; a2; ... ;an], a base value b:B, and a function f: A → B → B returns
+(f a1 (f a2 (... (f an b)...))). *)
+
+let rec foldr (A,B:Type[0]) (f:A → B → B) (b:B) (l:list A) on l :B ≝
+ match l with [ nil ⇒ b | cons a l ⇒ f a (foldr A B f b l)].
+
+(* As an example of application of foldr, let us use it to define a filter
+function that given a list l: list A and a boolean test p:A → bool returns the
+sublist of elements satisfying the test. In this case, the result type B of
+foldr should be (list A), the base value is [], and f: A → list A →list A is
+the function that taken x and l returns x::l, if x satisfies the test, and l
+otherwise. We use an if_then_else function included from bool.ma to this purpose. *)
+
+definition filter ≝
+ λT.λp:T → bool.
+ foldr T (list T) (λx,l0. if p x then x::l0 else l0) [].
+
+(* Here are a couple of simple lemmas on the behaviour of the filter function.
+It is often convenient to state such lemmas, in order to be able to use rewriting
+as an alternative to reduction in proofs: reduction is a bit difficult to control.
+*)
+
+lemma filter_true : ∀A,l,a,p. p a = true →
+ filter A p (a::l) = a :: filter A p l.
+#A #l #a #p #pa (elim l) normalize >pa // qed.
+
+lemma filter_false : ∀A,l,a,p. p a = false →
+ filter A p (a::l) = filter A p l.
+#A #l #a #p #pa (elim l) normalize >pa normalize // qed.
+
+(* As another example, let us redefine the map function using foldr. The
+result type B is (list B), the base value b is [], and the fold function
+of type A → list B → list B is the function mapping a and l to (f a)::l.
+*)
+
+definition map_again ≝ λA,B,f,l. foldr A (list B) (λa,l.f a::l) [] l.
+
+(*
+Extensional equality
+
+Can we prove that map_again is "the same" as map? We should first of all
+clarify in which sense we expect the two functions to be equal. Equality in
+Matita has an intentional meaning: it is the smallest predicate induced by
+convertibility, i.e. syntactical equality up to normalization. From an
+intentional point of view, map and map_again are not functions, but programs,
+and they are clearly different. What we would like to say is that the two
+programs behave in the same way: this is a different, extensional equality
+that can be defined in the following way. *)
+
+definition ExtEq ≝ λA,B:Type[0].λf,g:A→B.∀a:A.f a = g a.
+
+(* Proving that map and map_again are extentionally equal in the
+previous sense can be proved by a trivial structural induction on the list *)
+
+lemma eq_maps: ∀A,B,f. ExtEq ?? (map A B f) (map_again A B f).
+#A #B #f #n (elim n) normalize // qed.
+
+(* Let us make another remark about extensional equality. It is clear that,
+if f is extensionally equal to g, then (map A B f) is extensionally equal to
+(map A B g). Let us prove it. *)
+
+theorem eq_map : ∀A,B,f,g. ExtEq A B f g → ExtEq ?? (map A B f) (map A B g).
+#A #B #f #g #eqfg
+
+(* the relevant point is that we cannot proceed by rewriting f with g via
+eqfg, here. Rewriting only works with Matita intensional equality, while here
+we are dealing with a different predicate, defined by the user. The right way
+to proceed is to unfold the definition of ExtEq, and work by induction on l,
+as usual when we want to prove extensional equality between functions over
+inductive types; again the rest of the proof is trivial. *)
+
+#l (elim l) normalize // qed.
+
+(*
+Big Operators
+
+Building a library of basic functions, it is important to achieve a
+good degree of abstraction and generality, in order to be able to reuse
+suitable instances of the same function in different context. This has not
+only the obvious benefit of factorizing code, but especially to avoid
+repeating proofs of generic properties over and over again.
+A really convenient tool is the following combination of fold and filter,
+that essentially allow you to iterate on every subset of a given enumerated
+(finite) type, represented as a list. *)
+
+ let rec fold (A,B:Type[0]) (op:B→B→B) (b:B) (p:A→bool) (f:A→B) (l:list A) on l:B ≝
+ match l with
+ [ nil ⇒ b
+ | cons a l ⇒ if p a then op (f a) (fold A B op b p f l) else
+ (fold A B op b p f l)].
+
+(* It is also important to spend a few time to introduce some fancy notation
+for these iterators. *)
+
+ notation "\fold [ op , nil ]_{ ident i ∈ l | p} f"
+ with precedence 80
+for @{'fold $op $nil (λ${ident i}. $p) (λ${ident i}. $f) $l}.
+
+notation "\fold [ op , nil ]_{ident i ∈ l } f"
+ with precedence 80
+for @{'fold $op $nil (λ${ident i}.true) (λ${ident i}. $f) $l}.
+
+interpretation "\fold" 'fold op nil p f l = (fold ? ? op nil p f l).
+
+theorem fold_true:
+∀A,B.∀a:A.∀l.∀p.∀op:B→B→B.∀nil.∀f:A→B. p a = true →
+ \fold[op,nil]_{i ∈ a::l| p i} (f i) =
+ op (f a) \fold[op,nil]_{i ∈ l| p i} (f i).
+#A #B #a #l #p #op #nil #f #pa normalize >pa // qed.
+
+theorem fold_false:
+∀A,B.∀a:A.∀l.∀p.∀op:B→B→B.∀nil.∀f.
+p a = false → \fold[op,nil]_{i ∈ a::l| p i} (f i) =
+ \fold[op,nil]_{i ∈ l| p i} (f i).
+#A #B #a #l #p #op #nil #f #pa normalize >pa // qed.
+
+theorem fold_filter:
+∀A,B.∀a:A.∀l.∀p.∀op:B→B→B.∀nil.∀f:A →B.
+ \fold[op,nil]_{i ∈ l| p i} (f i) =
+ \fold[op,nil]_{i ∈ (filter A p l)} (f i).
+#A #B #a #l #p #op #nil #f elim l //
+#a #tl #Hind cases(true_or_false (p a)) #pa
+ [ >filter_true // > fold_true // >fold_true //
+ | >filter_false // >fold_false // ]
+qed.
+
+record Aop (A:Type[0]) (nil:A) : Type[0] ≝
+{op :2> A → A → A;
+ nill:∀a. op nil a = a;
+ nilr:∀a. op a nil = a;
+ assoc: ∀a,b,c.op a (op b c) = op (op a b) c
+}.
+
+theorem fold_sum: ∀A,B. ∀I,J:list A.∀nil.∀op:Aop B nil.∀f:A → B.
+ op (\fold[op,nil]_{i ∈ I} (f i)) (\fold[op,nil]_{i ∈ J} (f i)) =
+ \fold[op,nil]_{i ∈ (I@J)} (f i).
+#A #B #I #J #nil #op #f (elim I) normalize
+ [>nill//|#a #tl #Hind <assoc //]
+qed.
\ No newline at end of file
--- /dev/null
+(*
+Naif Set Theory
+
+*)
+include "basics/types.ma".
+include "basics/bool.ma".
+(*
+In this Chapter we shall develop a naif theory of sets represented as
+characteristic predicates over some universe A, that is as objects of type
+A→Prop.
+For instance the empty set is defined by the always false function: *)
+
+definition empty_set ≝ λA:Type[0].λa:A.False.
+notation "\emptyv" non associative with precedence 90 for @{'empty_set}.
+interpretation "empty set" 'empty_set = (empty_set ?).
+
+(* Similarly, a singleton set contaning containing an element a, is defined
+by by the characteristic function asserting equality with a *)
+
+definition singleton ≝ λA.λx,a:A.x=a.
+(* notation "{x}" non associative with precedence 90 for @{'sing_lang $x}. *)
+interpretation "singleton" 'singl x = (singleton ? x).
+
+(* The membership relation between an element of type A and a set S:A →Prop is
+simply the predicate resulting from the application of S to a.
+The operations of union, intersection, complement and substraction
+are easily defined in terms of the propositional connectives of dijunction,
+conjunction and negation *)
+
+definition union : ∀A:Type[0].∀P,Q.A → Prop ≝ λA,P,Q,a.P a ∨ Q a.
+interpretation "union" 'union a b = (union ? a b).
+
+definition intersection : ∀A:Type[0].∀P,Q.A→Prop ≝ λA,P,Q,a.P a ∧ Q a.
+interpretation "intersection" 'intersects a b = (intersection ? a b).
+
+definition complement ≝ λU:Type[0].λA:U → Prop.λw.¬ A w.
+interpretation "complement" 'not a = (complement ? a).
+
+definition substraction := λU:Type[0].λA,B:U → Prop.λw.A w ∧ ¬ B w.
+interpretation "substraction" 'minus a b = (substraction ? a b).
+
+(* Finally, we use implication to define the inclusion relation between
+sets *)
+
+definition subset: ∀A:Type[0].∀P,Q:A→Prop.Prop ≝ λA,P,Q.∀a:A.(P a → Q a).
+interpretation "subset" 'subseteq a b = (subset ? a b).
+
+(*
+Set Equality
+
+Two sets are equals if and only if they have the same elements, that is,
+if the two characteristic functions are extensionally equivalent: *)
+
+definition eqP ≝ λA:Type[0].λP,Q:A → Prop.∀a:A.P a ↔ Q a.
+notation "A =1 B" non associative with precedence 45 for @{'eqP $A $B}.
+interpretation "extensional equality" 'eqP a b = (eqP ? a b).
+
+(*
+This notion of equality is different from the intensional equality of
+functions; the fact it defines an equivalence relation must be explicitly
+proved: *)
+
+lemma eqP_sym: ∀U.∀A,B:U →Prop.
+ A =1 B → B =1 A.
+#U #A #B #eqAB #a @iff_sym @eqAB qed.
+
+lemma eqP_trans: ∀U.∀A,B,C:U →Prop.
+ A =1 B → B =1 C → A =1 C.
+#U #A #B #C #eqAB #eqBC #a @iff_trans // qed.
+
+(* For the same reason, we must also prove that all the operations behave well
+with respect to eqP: *)
+
+lemma eqP_union_r: ∀U.∀A,B,C:U →Prop.
+ A =1 C → A ∪ B =1 C ∪ B.
+#U #A #B #C #eqAB #a @iff_or_r @eqAB qed.
+
+lemma eqP_union_l: ∀U.∀A,B,C:U →Prop.
+ B =1 C → A ∪ B =1 A ∪ C.
+#U #A #B #C #eqBC #a @iff_or_l @eqBC qed.
+
+lemma eqP_intersect_r: ∀U.∀A,B,C:U →Prop.
+ A =1 C → A ∩ B =1 C ∩ B.
+#U #A #B #C #eqAB #a @iff_and_r @eqAB qed.
+
+lemma eqP_intersect_l: ∀U.∀A,B,C:U →Prop.
+ B =1 C → A ∩ B =1 A ∩ C.
+#U #A #B #C #eqBC #a @iff_and_l @eqBC qed.
+
+lemma eqP_substract_r: ∀U.∀A,B,C:U →Prop.
+ A =1 C → A - B =1 C - B.
+#U #A #B #C #eqAB #a @iff_and_r @eqAB qed.
+
+lemma eqP_substract_l: ∀U.∀A,B,C:U →Prop.
+ B =1 C → A - B =1 A - C.
+#U #A #B #C #eqBC #a @iff_and_l /2/ qed.
+
+(*
+Simple properties of sets
+
+We can now prove several properties of the previous set-theoretic operations.
+In particular, union is commutative and associative, and the empty set is an
+identity element: *)
+
+lemma union_empty_r: ∀U.∀A:U→Prop.
+ A ∪ ∅ =1 A.
+#U #A #w % [* // normalize #abs @False_ind /2/ | /2/]
+qed.
+
+lemma union_comm : ∀U.∀A,B:U →Prop.
+ A ∪ B =1 B ∪ A.
+#U #A #B #a % * /2/ qed.
+
+lemma union_assoc: ∀U.∀A,B,C:U → Prop.
+ A ∪ B ∪ C =1 A ∪ (B ∪ C).
+#S #A #B #C #w % [* [* /3/ | /3/ ] | * [/3/ | * /3/]
+qed.
+
+(* In the same way we prove commutativity and associativity for set
+interesection *)
+
+lemma cap_comm : ∀U.∀A,B:U →Prop.
+ A ∩ B =1 B ∩ A.
+#U #A #B #a % * /2/ qed.
+
+lemma cap_assoc: ∀U.∀A,B,C:U→Prop.
+ A ∩ (B ∩ C) =1 (A ∩ B) ∩ C.
+#U #A #B #C #w % [ * #Aw * /3/ | * * /3/ ]
+qed.
+
+(* We can also easily prove idempotency for union and intersection *)
+
+lemma union_idemp: ∀U.∀A:U →Prop.
+ A ∪ A =1 A.
+#U #A #a % [* // | /2/] qed.
+
+lemma cap_idemp: ∀U.∀A:U →Prop.
+ A ∩ A =1 A.
+#U #A #a % [* // | /2/] qed.
+
+(* We conclude our examples with a couple of distributivity theorems, and a
+characterization of substraction in terms of interesection and complementation. *)
+
+lemma distribute_intersect : ∀U.∀A,B,C:U→Prop.
+ (A ∪ B) ∩ C =1 (A ∩ C) ∪ (B ∩ C).
+#U #A #B #C #w % [* * /3/ | * * /3/]
+qed.
+
+lemma distribute_substract : ∀U.∀A,B,C:U→Prop.
+ (A ∪ B) - C =1 (A - C) ∪ (B - C).
+#U #A #B #C #w % [* * /3/ | * * /3/]
+qed.
+
+lemma substract_def:∀U.∀A,B:U→Prop. A-B =1 A ∩ ¬B.
+#U #A #B #w normalize /2/
+qed.
+
+(*
+Bool vs. Prop
+
+In several situation it is important to assume to have a decidable equality
+between elements of a set U, namely a boolean function eqb: U→U→bool such that
+for any pair of elements a and b in U, (eqb x y) is true if and only if x=y.
+A set equipped with such an equality is called a DeqSet: *)
+
+record DeqSet : Type[1] ≝ { carr :> Type[0];
+ eqb: carr → carr → bool;
+ eqb_true: ∀x,y. (eqb x y = true) ↔ (x = y)
+}.
+
+(* We use the notation == to denote the decidable equality, to distinguish it
+from the propositional equality. In particular, a term of the form a==b is a
+boolean, while a=b is a proposition. *)
+
+notation "a == b" non associative with precedence 45 for @{ 'eqb $a $b }.
+interpretation "eqb" 'eqb a b = (eqb ? a b).
+
+(*
+Small Scale Reflection
+
+It is convenient to have a simple way to reflect a proof of the fact
+that (eqb a b) is true into a proof of the proposition (a = b); to this aim,
+we introduce two operators "\P" and "\b". *)
+
+notation "\P H" non associative with precedence 90
+ for @{(proj1 … (eqb_true ???) $H)}.
+
+notation "\b H" non associative with precedence 90
+ for @{(proj2 … (eqb_true ???) $H)}.
+
+(* If H:eqb a b = true, then \P H: a = b, and conversely if h:a = b, then
+\b h: eqb a b = true. Let us see an example of their use: the following
+statement asserts that we can reflect a proof that eqb a b is false into
+a proof of the proposition a ≠ b. *)
+
+lemma eqb_false: ∀S:DeqSet.∀a,b:S.
+ (eqb ? a b) = false ↔ a ≠ b.
+
+(* We start the proof introducing the hypothesis, and then split the "if" and
+"only if" cases *)
+
+#S #a #b % #H
+
+(* The latter is easily reduced to prove the goal true=false under the assumption
+H1: a = b *)
+ [@(not_to_not … not_eq_true_false) #H1
+
+(* since by assumption H false is equal to (a==b), by rewriting we obtain the goal
+true=(a==b) that is just the boolean version of H1 *)
+
+ <H @sym_eq @(\b H1)
+
+(* In the "if" case, we proceed by cases over the boolean equality (a==b); if
+(a==b) is false, the goal is trivial; the other case is absurd, since if (a==b) is
+true, then by reflection a=b, while by hypothesis a≠b *)
+
+ |cases (true_or_false (eqb ? a b)) // #H1 @False_ind @(absurd … (\P H1) H)
+ ]
+qed.
+
+(* We also introduce two operators "\Pf" and "\bf" to reflect a proof
+of (a==b)=false into a proof of a≠b, and vice-versa *)
+
+notation "\Pf H" non associative with precedence 90
+ for @{(proj1 … (eqb_false ???) $H)}.
+
+notation "\bf H" non associative with precedence 90
+ for @{(proj2 … (eqb_false ???) $H)}.
+
+(* The following statement proves that propositional equality in a
+DeqSet is decidable in the traditional sense, namely either a=b or a≠b *)
+
+ lemma dec_eq: ∀S:DeqSet.∀a,b:S. a = b ∨ a ≠ b.
+#S #a #b cases (true_or_false (eqb ? a b)) #H
+ [%1 @(\P H) | %2 @(\Pf H)]
+qed.
+
+(*
+Unification Hints
+
+A simple example of a set with a decidable equality is bool. We first define
+the boolean equality beqb, that is just the xand function, then prove that
+beqb b1 b2 is true if and only if b1=b2, and finally build the type DeqBool by
+instantiating the DeqSet record with the previous information *)
+
+definition beqb ≝ λb1,b2.
+ match b1 with [ true ⇒ b2 | false ⇒ notb b2].
+
+notation < "a == b" non associative with precedence 45 for @{beqb $a $b }.
+
+lemma beqb_true: ∀b1,b2. iff (beqb b1 b2 = true) (b1 = b2).
+#b1 #b2 cases b1 cases b2 normalize /2/
+qed.
+
+definition DeqBool ≝ mk_DeqSet bool beqb beqb_true.
+
+(* At this point, we would expect to be able to prove things like the
+following: for any boolean b, if b==false is true then b=false.
+Unfortunately, this would not work, unless we declare b of type
+DeqBool (change the type in the following statement and see what
+happens). *)
+
+example exhint: ∀b:DeqBool. (b==false) = true → b=false.
+#b #H @(\P H)
+qed.
+
+(* The point is that == expects in input a pair of objects whose type must be the
+carrier of a DeqSet; bool is indeed the carrier of DeqBool, but the type inference
+system has no knowledge of it (it is an information that has been supplied by the
+user, and stored somewhere in the library). More explicitly, the type inference
+inference system, would face an unification problem consisting to unify bool
+against the carrier of something (a metavaribale) and it has no way to synthetize
+the answer. To solve this kind of situations, matita provides a mechanism to hint
+the system the expected solution. A unification hint is a kind of rule, whose rhd
+is the unification problem, containing some metavariables X1, ..., Xn, and whose
+left hand side is the solution suggested to the system, in the form of equations
+Xi=Mi. The hint is accepted by the system if and only the solution is correct, that
+is, if it is a unifier for the given problem.
+To make an example, in the previous case, the unification problem is bool = carr X,
+and the hint is to take X= mk_DeqSet bool beqb true. The hint is correct, since
+bool is convertible with (carr (mk_DeqSet bool beb true)). *)
+
+unification hint 0 ≔ ;
+ X ≟ mk_DeqSet bool beqb beqb_true
+(* ---------------------------------------- *) ⊢
+ bool ≡ carr X.
+
+unification hint 0 ≔ b1,b2:bool;
+ X ≟ mk_DeqSet bool beqb beqb_true
+(* ---------------------------------------- *) ⊢
+ beqb b1 b2 ≡ eqb X b1 b2.
+
+(* After having provided the previous hints, we may rewrite example exhint
+declaring b of type bool. *)
+
+example exhint1: ∀b:bool. (b == false) = true → b = false.
+#b #H @(\P H)
+qed.
+
+(* The cartesian product of two DeqSets is still a DeqSet. To prove
+this, we must as usual define the boolen equality function, and prove
+it correctly reflects propositional equality. *)
+
+definition eq_pairs ≝
+ λA,B:DeqSet.λp1,p2:A×B.(\fst p1 == \fst p2) ∧ (\snd p1 == \snd p2).
+
+lemma eq_pairs_true: ∀A,B:DeqSet.∀p1,p2:A×B.
+ eq_pairs A B p1 p2 = true ↔ p1 = p2.
+#A #B * #a1 #b1 * #a2 #b2 %
+ [#H cases (andb_true …H) normalize #eqa #eqb >(\P eqa) >(\P eqb) //
+ |#H destruct normalize >(\b (refl … a2)) >(\b (refl … b2)) //
+ ]
+qed.
+
+definition DeqProd ≝ λA,B:DeqSet.
+ mk_DeqSet (A×B) (eq_pairs A B) (eq_pairs_true A B).
+
+(* Having an unification problem of the kind T1×T2 = carr X, what kind
+of hint can we give to the system? We expect T1 to be the carrier of a
+DeqSet C1, T2 to be the carrier of a DeqSet C2, and X to be DeqProd C1 C2.
+This is expressed by the following hint: *)
+
+unification hint 0 ≔ C1,C2;
+ T1 ≟ carr C1,
+ T2 ≟ carr C2,
+ X ≟ DeqProd C1 C2
+(* ---------------------------------------- *) ⊢
+ T1×T2 ≡ carr X.
+
+unification hint 0 ≔ T1,T2,p1,p2;
+ X ≟ DeqProd T1 T2
+(* ---------------------------------------- *) ⊢
+ eq_pairs T1 T2 p1 p2 ≡ eqb X p1 p2.
+
+example hint2: ∀b1,b2.
+ 〈b1,true〉==〈false,b2〉=true → 〈b1,true〉=〈false,b2〉.
+#b1 #b2 #H @(\P H).
\ No newline at end of file
--- /dev/null
+(*
+Effective searching
+
+The fact of being able to decide, via a computable boolean function, the
+equality between elements of a given set is an essential prerequisite for
+effectively searching an element of that set inside a data structure. In this
+section we shall define several boolean functions acting on lists of elements in
+a DeqSet, and prove some of their properties.*)
+
+include "basics/list.ma".
+include "tutorial/chapter4.ma".
+
+(* The first function we define is an effective version of the membership relation,
+between an element x and a list l. Its definition is a straightforward recursion on
+l.*)
+
+let rec memb (S:DeqSet) (x:S) (l: list S) on l ≝
+ match l with
+ [ nil ⇒ false
+ | cons a tl ⇒ (x == a) ∨ memb S x tl
+ ].
+
+notation < "\memb x l" non associative with precedence 90 for @{'memb $x $l}.
+interpretation "boolean membership" 'memb a l = (memb ? a l).
+
+(* We can now prove several interesing properties for memb:
+- memb_hd: x is a member of x::l
+- memb_cons: if x is a member of l than x is a member of a::l
+- memb_single: if x is a member of [a] then x=a
+- memb_append: if x is a member of l1@l2 then either x is a member of l1
+ or x is a member of l2
+- memb_append_l1: if x is a member of l1 then x is a member of l1@l2
+- memb_append_l2: if x is a member of l2 then x is a member of l1@l2
+- memb_exists: if x is a member of l, than l can decomposed as l1@(x::l2)
+- not_memb_to_not_eq: if x is not a member of l and y is, then x≠y
+- memb_map: if a is a member of l, then (f a) is a member of (map f l)
+- memb_compose: if a is a member of l1 and b is a meber of l2 than
+ (op a b) is a member of (compose op l1 l2)
+*)
+
+lemma memb_hd: ∀S,a,l. memb S a (a::l) = true.
+#S #a #l normalize >(proj2 … (eqb_true S …) (refl S a)) //
+qed.
+
+lemma memb_cons: ∀S,a,b,l.
+ memb S a l = true → memb S a (b::l) = true.
+#S #a #b #l normalize cases (a==b) normalize //
+qed.
+
+lemma memb_single: ∀S,a,x. memb S a (x::[]) = true → a = x.
+#S #a #x normalize cases (true_or_false … (a==x)) #H
+ [#_ >(\P H) // |>H normalize #abs @False_ind /2/]
+qed.
+
+lemma memb_append: ∀S,a,l1,l2.
+memb S a (l1@l2) = true → memb S a l1= true ∨ memb S a l2 = true.
+#S #a #l1 elim l1 normalize [#l2 #H %2 //]
+#b #tl #Hind #l2 cases (a==b) normalize /2/
+qed.
+
+lemma memb_append_l1: ∀S,a,l1,l2.
+ memb S a l1= true → memb S a (l1@l2) = true.
+#S #a #l1 elim l1 normalize
+ [normalize #le #abs @False_ind /2/
+ |#b #tl #Hind #l2 cases (a==b) normalize /2/
+ ]
+qed.
+
+lemma memb_append_l2: ∀S,a,l1,l2.
+ memb S a l2= true → memb S a (l1@l2) = true.
+#S #a #l1 elim l1 normalize //
+#b #tl #Hind #l2 cases (a==b) normalize /2/
+qed.
+
+lemma memb_exists: ∀S,a,l.memb S a l = true → ∃l1,l2.l=l1@(a::l2).
+#S #a #l elim l [normalize #abs @False_ind /2/]
+#b #tl #Hind #H cases (orb_true_l … H)
+ [#eqba @(ex_intro … (nil S)) @(ex_intro … tl) >(\P eqba) //
+ |#mem_tl cases (Hind mem_tl) #l1 * #l2 #eqtl
+ @(ex_intro … (b::l1)) @(ex_intro … l2) >eqtl //
+ ]
+qed.
+
+lemma not_memb_to_not_eq: ∀S,a,b,l.
+ memb S a l = false → memb S b l = true → a==b = false.
+#S #a #b #l cases (true_or_false (a==b)) //
+#eqab >(\P eqab) #H >H #abs @False_ind /2/
+qed.
+
+lemma memb_map: ∀S1,S2,f,a,l. memb S1 a l= true →
+ memb S2 (f a) (map … f l) = true.
+#S1 #S2 #f #a #l elim l normalize [//]
+#x #tl #memba cases (true_or_false (a==x))
+ [#eqx >eqx >(\P eqx) >(\b (refl … (f x))) normalize //
+ |#eqx >eqx cases (f a==f x) normalize /2/
+ ]
+qed.
+
+lemma memb_compose: ∀S1,S2,S3,op,a1,a2,l1,l2.
+ memb S1 a1 l1 = true → memb S2 a2 l2 = true →
+ memb S3 (op a1 a2) (compose S1 S2 S3 op l1 l2) = true.
+#S1 #S2 #S3 #op #a1 #a2 #l1 elim l1 [normalize //]
+#x #tl #Hind #l2 #memba1 #memba2 cases (orb_true_l … memba1)
+ [#eqa1 >(\P eqa1) @memb_append_l1 @memb_map //
+ |#membtl @memb_append_l2 @Hind //
+ ]
+qed.
+
+(*
+Unicity
+
+If we are interested in representing finite sets as lists, is is convenient
+to avoid duplications of elements. The following uniqueb check this property.
+*)
+
+let rec uniqueb (S:DeqSet) l on l : bool ≝
+ match l with
+ [ nil ⇒ true
+ | cons a tl ⇒ notb (memb S a tl) ∧ uniqueb S tl
+ ].
+
+(* unique_append l1 l2 add l1 in fornt of l2, but preserving unicity *)
+
+let rec unique_append (S:DeqSet) (l1,l2: list S) on l1 ≝
+ match l1 with
+ [ nil ⇒ l2
+ | cons a tl ⇒
+ let r ≝ unique_append S tl l2 in
+ if memb S a r then r else a::r
+ ].
+
+lemma memb_unique_append: ∀S,a,l1,l2.
+ memb S a (unique_append S l1 l2) = true → memb S a l1= true ∨ memb S a l2 = true.
+#S #a #l1 elim l1 normalize [#l2 #H %2 //]
+#b #tl #Hind #l2 cases (true_or_false … (a==b)) #Hab >Hab normalize /2/
+cases (memb S b (unique_append S tl l2)) normalize
+ [@Hind | >Hab normalize @Hind]
+qed.
+
+lemma unique_append_elim: ∀S:DeqSet.∀P: S → Prop.∀l1,l2.
+ (∀x. memb S x l1 = true → P x) → (∀x. memb S x l2 = true → P x) →
+ ∀x. memb S x (unique_append S l1 l2) = true → P x.
+#S #P #l1 #l2 #Hl1 #Hl2 #x #membx cases (memb_unique_append … membx) /2/
+qed.
+
+lemma unique_append_unique: ∀S,l1,l2. uniqueb S l2 = true →
+ uniqueb S (unique_append S l1 l2) = true.
+#S #l1 elim l1 normalize // #a #tl #Hind #l2 #uniquel2
+cases (true_or_false … (memb S a (unique_append S tl l2)))
+#H >H normalize [@Hind //] >H normalize @Hind //
+qed.
+
+(*
+Sublists
+
+*)
+definition sublist ≝
+ λS,l1,l2.∀a. memb S a l1 = true → memb S a l2 = true.
+
+lemma sublist_length: ∀S,l1,l2.
+ uniqueb S l1 = true → sublist S l1 l2 → |l1| ≤ |l2|.
+#S #l1 elim l1 //
+#a #tl #Hind #l2 #unique #sub
+cut (∃l3,l4.l2=l3@(a::l4)) [@memb_exists @sub //]
+* #l3 * #l4 #eql2 >eql2 >length_append normalize
+applyS le_S_S <length_append @Hind [@(andb_true_r … unique)]
+>eql2 in sub; #sub #x #membx
+cases (memb_append … (sub x (orb_true_r2 … membx)))
+ [#membxl3 @memb_append_l1 //
+ |#membxal4 cases (orb_true_l … membxal4)
+ [#eqxa @False_ind lapply (andb_true_l … unique)
+ <(\P eqxa) >membx normalize /2/ |#membxl4 @memb_append_l2 //
+ ]
+ ]
+qed.
+
+lemma sublist_unique_append_l1:
+ ∀S,l1,l2. sublist S l1 (unique_append S l1 l2).
+#S #l1 elim l1 normalize [#l2 #S #abs @False_ind /2/]
+#x #tl #Hind #l2 #a
+normalize cases (true_or_false … (a==x)) #eqax >eqax
+[<(\P eqax) cases (true_or_false (memb S a (unique_append S tl l2)))
+ [#H >H normalize // | #H >H normalize >(\b (refl … a)) //]
+|cases (memb S x (unique_append S tl l2)) normalize
+ [/2/ |>eqax normalize /2/]
+]
+qed.
+
+lemma sublist_unique_append_l2:
+ ∀S,l1,l2. sublist S l2 (unique_append S l1 l2).
+#S #l1 elim l1 [normalize //] #x #tl #Hind normalize
+#l2 #a cases (memb S x (unique_append S tl l2)) normalize
+[@Hind | cases (a==x) normalize // @Hind]
+qed.
+
+lemma decidable_sublist:∀S,l1,l2.
+ (sublist S l1 l2) ∨ ¬(sublist S l1 l2).
+#S #l1 #l2 elim l1
+ [%1 #a normalize in ⊢ (%→?); #abs @False_ind /2/
+ |#a #tl * #subtl
+ [cases (true_or_false (memb S a l2)) #memba
+ [%1 whd #x #membx cases (orb_true_l … membx)
+ [#eqax >(\P eqax) // |@subtl]
+ |%2 @(not_to_not … (eqnot_to_noteq … true memba)) #H1 @H1 @memb_hd
+ ]
+ |%2 @(not_to_not … subtl) #H1 #x #H2 @H1 @memb_cons //
+ ]
+ ]
+qed.
+
+(*
+Filtering
+*)
+
+lemma memb_filter_true: ∀S,f,a,l.
+ memb S a (filter S f l) = true → f a = true.
+#S #f #a #l elim l [normalize #H @False_ind /2/]
+#b #tl #Hind cases (true_or_false (f b)) #H
+normalize >H normalize [2:@Hind]
+cases (true_or_false (a==b)) #eqab
+ [#_ >(\P eqab) // | >eqab normalize @Hind]
+qed.
+
+lemma memb_filter_memb: ∀S,f,a,l.
+ memb S a (filter S f l) = true → memb S a l = true.
+#S #f #a #l elim l [normalize //]
+#b #tl #Hind normalize (cases (f b)) normalize
+cases (a==b) normalize // @Hind
+qed.
+
+lemma memb_filter: ∀S,f,l,x. memb S x (filter ? f l) = true →
+memb S x l = true ∧ (f x = true).
+/3/ qed.
+
+lemma memb_filter_l: ∀S,f,x,l. (f x = true) → memb S x l = true →
+memb S x (filter ? f l) = true.
+#S #f #x #l #fx elim l normalize //
+#b #tl #Hind cases (true_or_false (x==b)) #eqxb
+ [<(\P eqxb) >(\b (refl … x)) >fx normalize >(\b (refl … x)) normalize //
+ |>eqxb cases (f b) normalize [>eqxb normalize @Hind| @Hind]
+ ]
+qed.
+
+(*
+Exists
+
+*)
+
+let rec exists (A:Type[0]) (p:A → bool) (l:list A) on l : bool ≝
+match l with
+[ nil ⇒ false
+| cons h t ⇒ orb (p h) (exists A p t)
+].
\ No newline at end of file
--- /dev/null
+(*
+Formal Languages
+
+In this chapter we shall apply our notion of DeqSet, and the list operations
+defined in Chapter 4 to formal languages. A formal language is an arbitrary set
+of words over a given alphabet, that we shall represent as a predicate over words.
+*)
+include "tutorial/chapter5.ma".
+
+(* A word (or string) over an alphabet S is just a list of elements of S.*)
+definition word ≝ λS:DeqSet.list S.
+
+(* For any alphabet there is only one word of length 0, the empty word, which is
+denoted by ϵ .*)
+
+notation "ϵ" non associative with precedence 90 for @{ 'epsilon }.
+interpretation "epsilon" 'epsilon = (nil ?).
+
+(* The operation that consists in appending two words to form a new word, whose
+length is the sum of the lengths of the original words is called concatenation.
+String concatenation is just the append operation over lists, hence there is no
+point to define it. Similarly, many of its properties, such as the fact that
+concatenating a word with the empty word gives the original word, follow by
+general results over lists.
+*)
+
+(*
+Operations over languages
+
+Languages inherit all the basic operations for sets, namely union, intersection,
+complementation, substraction, and so on. In addition, we may define some new
+operations induced by string concatenation, and in particular the concatenation
+A · B of two languages A and B, the so called Kleene's star A* of A and the
+derivative of a language A w.r.t. a given character a. *)
+
+definition cat : ∀S,l1,l2,w.Prop ≝
+ λS.λl1,l2.λw:word S.∃w1,w2.w1 @ w2 = w ∧ l1 w1 ∧ l2 w2.
+
+notation "a · b" non associative with precedence 60 for @{ 'middot $a $b}.
+interpretation "cat lang" 'middot a b = (cat ? a b).
+
+
+(* Given a language l, the Kleene's star of l, denoted by l*, is the set of
+finite-length strings that can be generated by concatenating arbitrary strings of
+l. In other words, w belongs to l* is and only if there exists a list of strings
+w1,w2,...wk all belonging to l, such that l = w1w2...wk.
+We need to define the latter operations. The following flatten function takes in
+input a list of words and concatenates them together. *)
+
+let rec flatten (S : DeqSet) (l : list (word S)) on l : word S ≝
+match l with [ nil ⇒ [ ] | cons w tl ⇒ w @ flatten ? tl ].
+
+(* Given a list of words l and a language r, (conjunct l r) is true if and only if
+all words in l are in r, that is for every w in l, r w holds. *)
+
+let rec conjunct (S : DeqSet) (l : list (word S)) (r : word S → Prop) on l: Prop ≝
+match l with [ nil ⇒ True | cons w tl ⇒ r w ∧ conjunct ? tl r ].
+
+(* We are ready to give the formal definition of the Kleene's star of l:
+a word w belongs to l* is and only if there exists a list of strings
+lw such that (conjunct lw l) and l = flatten lw. *)
+
+definition star ≝ λS.λl.λw:word S.∃lw.flatten ? lw = w ∧ conjunct ? lw l.
+notation "a ^ *" non associative with precedence 90 for @{ 'star $a}.
+interpretation "star lang" 'star l = (star ? l).
+
+(* The derivative of a language A with respect to a character a is the set of
+all strings w such that aw is in A. *)
+
+definition deriv ≝ λS.λA:word S → Prop.λa,w. A (a::w).
+
+(*
+Language equalities
+
+Equality between languages is just the usual extensional equality between
+sets. The operation of concatenation behaves well with respect to this equality. *)
+
+lemma cat_ext_l: ∀S.∀A,B,C:word S →Prop.
+ A =1 C → A · B =1 C · B.
+#S #A #B #C #H #w % * #w1 * #w2 * * #eqw #inw1 #inw2
+cases (H w1) /6/
+qed.
+
+lemma cat_ext_r: ∀S.∀A,B,C:word S →Prop.
+ B =1 C → A · B =1 A · C.
+#S #A #B #C #H #w % * #w1 * #w2 * * #eqw #inw1 #inw2
+cases (H w2) /6/
+qed.
+
+(* Concatenating a language with the empty language results in the
+empty language. *)
+lemma cat_empty_l: ∀S.∀A:word S→Prop. ∅ · A =1 ∅.
+#S #A #w % [|*] * #w1 * #w2 * * #_ *
+qed.
+
+(* Concatenating a language l with the singleton language containing the
+empty string, results in the language l; that is {ϵ} is a left and right
+unit with respect to concatenation. *)
+
+lemma epsilon_cat_r: ∀S.∀A:word S →Prop.
+ A · {ϵ} =1 A.
+#S #A #w %
+ [* #w1 * #w2 * * #eqw #inw1 normalize #eqw2 <eqw //
+ |#inA @(ex_intro … w) @(ex_intro … [ ]) /3/
+ ]
+qed.
+
+lemma epsilon_cat_l: ∀S.∀A:word S →Prop.
+ {ϵ} · A =1 A.
+#S #A #w %
+ [* #w1 * #w2 * * #eqw normalize #eqw2 <eqw <eqw2 //
+ |#inA @(ex_intro … ϵ) @(ex_intro … w) /3/
+ ]
+ qed.
+
+(* Concatenation is distributive w.r.t. union. *)
+
+lemma distr_cat_r: ∀S.∀A,B,C:word S →Prop.
+ (A ∪ B) · C =1 A · C ∪ B · C.
+#S #A #B #C #w %
+ [* #w1 * #w2 * * #eqw * /6/ |* * #w1 * #w2 * * /6/]
+qed.
+
+lemma distr_cat_r_eps: ∀S.∀A,C:word S →Prop.
+ (A ∪ {ϵ}) · C =1 A · C ∪ C.
+ #S #A #C @eqP_trans [|@distr_cat_r |@eqP_union_l @epsilon_cat_l]
+qed.
+
+(* The following is a major property of derivatives *)
+
+lemma deriv_middot: ∀S,A,B,a. ¬ A ϵ → deriv S (A·B) a =1 (deriv S A a) · B.
+#S #A #B #a #noteps #w normalize %
+ [* #w1 cases w1
+ [* #w2 * * #_ #Aeps @False_ind /2/
+ |#b #w2 * #w3 * * whd in ⊢ ((??%?)→?); #H destruct
+ #H #H1 @(ex_intro … w2) @(ex_intro … w3) % // % //
+ ]
+ |* #w1 * #w2 * * #H #H1 #H2 @(ex_intro … (a::w1))
+ @(ex_intro … w2) % // % normalize //
+ ]
+qed.
+
+(*
+Main Properties of Kleene's star
+
+We conclude this section with some important properties of Kleene's
+star that will be used in the following chapters. *)
+
+lemma espilon_in_star: ∀S.∀A:word S → Prop.
+ A^* ϵ.
+#S #A @(ex_intro … [ ]) normalize /2/
+qed.
+
+lemma cat_to_star:∀S.∀A:word S → Prop.
+ ∀w1,w2. A w1 → A^* w2 → A^* (w1@w2).
+#S #A #w1 #w2 #Aw * #l * #H #H1 @(ex_intro … (w1::l))
+% normalize /2/
+qed.
+
+lemma fix_star: ∀S.∀A:word S → Prop.
+ A^* =1 A · A^* ∪ {ϵ}.
+#S #A #w %
+ [* #l generalize in match w; -w cases l [normalize #w * /2/]
+ #w1 #tl #w * whd in ⊢ ((??%?)→?); #eqw whd in ⊢ (%→?); *
+ #w1A #cw1 %1 @(ex_intro … w1) @(ex_intro … (flatten S tl))
+ % /2/ whd @(ex_intro … tl) /2/
+ |* [2: whd in ⊢ (%→?); #eqw <eqw //]
+ * #w1 * #w2 * * #eqw <eqw @cat_to_star
+ ]
+qed.
+
+lemma star_fix_eps : ∀S.∀A:word S → Prop.
+ A^* =1 (A - {ϵ}) · A^* ∪ {ϵ}.
+#S #A #w %
+ [* #l elim l
+ [* whd in ⊢ ((??%?)→?); #eqw #_ %2 <eqw //
+ |* [#tl #Hind * #H * #_ #H2 @Hind % [@H | //]
+ |#a #w1 #tl #Hind * whd in ⊢ ((??%?)→?); #H1 * #H2 #H3 %1
+ @(ex_intro … (a::w1)) @(ex_intro … (flatten S tl)) %
+ [% [@H1 | normalize % /2/] |whd @(ex_intro … tl) /2/]
+ ]
+ ]
+ |* [* #w1 * #w2 * * #eqw * #H1 #_ <eqw @cat_to_star //
+ | whd in ⊢ (%→?); #H <H //
+ ]
+ ]
+qed.
+
+lemma star_epsilon: ∀S:DeqSet.∀A:word S → Prop.
+ A^* ∪ {ϵ} =1 A^*.
+#S #A #w % /2/ * //
+qed.
+
\ No newline at end of file
--- /dev/null
+(*
+Regular Expressions
+
+We shall apply all the previous machinery to the study of regular languages
+and the constructions of the associated finite automata. *)
+
+include "tutorial/chapter6.ma".
+
+(* The type re of regular expressions over an alphabet $S$ is the smallest
+collection of objects generated by the following constructors: *)
+
+inductive re (S: DeqSet) : Type[0] ≝
+ z: re S (* empty: ∅ *)
+ | e: re S (* epsilon: ϵ *)
+ | s: S → re S (* symbol: a *)
+ | c: re S → re S → re S (* concatenation: e1 · e2 *)
+ | o: re S → re S → re S (* plus: e1 + e2 *)
+ | k: re S → re S. (* kleene's star: e* *)
+
+interpretation "re epsilon" 'epsilon = (e ?).
+interpretation "re or" 'plus a b = (o ? a b).
+interpretation "re cat" 'middot a b = (c ? a b).
+interpretation "re star" 'star a = (k ? a).
+
+notation < "a" non associative with precedence 90 for @{ 'ps $a}.
+notation > "` term 90 a" non associative with precedence 90 for @{ 'ps $a}.
+interpretation "atom" 'ps a = (s ? a).
+
+notation "`∅" non associative with precedence 90 for @{ 'empty }.
+interpretation "empty" 'empty = (z ?).
+
+(* The language sem{e} associated with the regular expression e is inductively
+defined by the following function: *)
+
+let rec in_l (S : DeqSet) (r : re S) on r : word S → Prop ≝
+match r with
+[ z ⇒ ∅
+| e ⇒ {ϵ}
+| s x ⇒ { (x::[]) }
+| c r1 r2 ⇒ (in_l ? r1) · (in_l ? r2)
+| o r1 r2 ⇒ (in_l ? r1) ∪ (in_l ? r2)
+| k r1 ⇒ (in_l ? r1) ^*].
+
+notation "\sem{term 19 E}" non associative with precedence 75 for @{'in_l $E}.
+interpretation "in_l" 'in_l E = (in_l ? E).
+interpretation "in_l mem" 'mem w l = (in_l ? l w).
+
+lemma rsem_star : ∀S.∀r: re S. \sem{r^*} = \sem{r}^*.
+// qed.
+
+
+(*
+Pointed Regular expressions
+
+We now introduce pointed regular expressions, that are the main tool we shall
+use for the construction of the automaton.
+A pointed regular expression is just a regular expression internally labelled
+with some additional points. Intuitively, points mark the positions inside the
+regular expression which have been reached after reading some prefix of
+the input string, or better the positions where the processing of the remaining
+string has to be started. Each pointed expression for $e$ represents a state of
+the {\em deterministic} automaton associated with $e$; since we obviously have
+only a finite number of possible labellings, the number of states of the automaton
+is finite.
+
+Pointed regular expressions provide the tool for an algebraic revisitation of
+McNaughton and Yamada's algorithm for position automata, making the proof of its
+correctness, that is far from trivial, particularly clear and simple. In particular,
+pointed expressions offer an appealing alternative to Brzozowski's derivatives,
+avoiding their weakest point, namely the fact of being forced to quotient derivatives
+w.r.t. a suitable notion of equivalence in order to get a finite number of states
+(that is not essential for recognizing strings, but is crucial for comparing regular
+expressions).
+
+Our main data structure is the notion of pointed item, that is meant whose purpose
+is to encode a set of positions inside a regular expression.
+The idea of formalizing pointers inside a data type by means of a labelled version
+of the data type itself is probably one of the first, major lessons learned in the
+formalization of the metatheory of programming languages. For our purposes, it is
+enough to mark positions preceding individual characters, so we shall have two kinds
+of characters •a (pp a) and a (ps a) according to the case a is pointed or not. *)
+
+inductive pitem (S: DeqSet) : Type[0] ≝
+ pz: pitem S (* empty *)
+ | pe: pitem S (* epsilon *)
+ | ps: S → pitem S (* symbol *)
+ | pp: S → pitem S (* pointed sysmbol *)
+ | pc: pitem S → pitem S → pitem S (* concatenation *)
+ | po: pitem S → pitem S → pitem S (* plus *)
+ | pk: pitem S → pitem S. (* kleene's star *)
+
+(* A pointed regular expression (pre) is just a pointed item with an additional
+boolean, that must be understood as the possibility to have a trailing point at
+the end of the expression. As we shall see, pointed regular expressions can be
+understood as states of a DFA, and the boolean indicates if
+the state is final or not. *)
+
+definition pre ≝ λS.pitem S × bool.
+
+interpretation "pitem star" 'star a = (pk ? a).
+interpretation "pitem or" 'plus a b = (po ? a b).
+interpretation "pitem cat" 'middot a b = (pc ? a b).
+notation < ".a" non associative with precedence 90 for @{ 'pp $a}.
+notation > "`. term 90 a" non associative with precedence 90 for @{ 'pp $a}.
+interpretation "pitem pp" 'pp a = (pp ? a).
+interpretation "pitem ps" 'ps a = (ps ? a).
+interpretation "pitem epsilon" 'epsilon = (pe ?).
+interpretation "pitem empty" 'empty = (pz ?).
+
+(* The carrier $|i|$ of an item i is the regular expression obtained from i by
+removing all the points. Similarly, the carrier of a pointed regular expression
+is the carrier of its item. *)
+
+let rec forget (S: DeqSet) (l : pitem S) on l: re S ≝
+ match l with
+ [ pz ⇒ z ? (* `∅ *)
+ | pe ⇒ ϵ
+ | ps x ⇒ `x
+ | pp x ⇒ `x
+ | pc E1 E2 ⇒ (forget ? E1) · (forget ? E2)
+ | po E1 E2 ⇒ (forget ? E1) + (forget ? E2)
+ | pk E ⇒ (forget ? E)^* ].
+
+(* notation < "|term 19 e|" non associative with precedence 70 for @{'forget $e}.*)
+interpretation "forget" 'norm a = (forget ? a).
+
+lemma erase_dot : ∀S.∀e1,e2:pitem S. |e1 · e2| = c ? (|e1|) (|e2|).
+// qed.
+
+lemma erase_plus : ∀S.∀i1,i2:pitem S.
+ |i1 + i2| = |i1| + |i2|.
+// qed.
+
+lemma erase_star : ∀S.∀i:pitem S.|i^*| = |i|^*.
+// qed.
+
+(*
+Comparing items and pres
+
+Items and pres are very concrete datatypes: they can be effectively compared,
+and enumerated. In particular, we can define a boolean equality beqitem and a proof
+beqitem_true that it refects propositional equality, enriching the set (pitem S)
+to a DeqSet. *)
+
+let rec beqitem S (i1,i2: pitem S) on i1 ≝
+ match i1 with
+ [ pz ⇒ match i2 with [ pz ⇒ true | _ ⇒ false]
+ | pe ⇒ match i2 with [ pe ⇒ true | _ ⇒ false]
+ | ps y1 ⇒ match i2 with [ ps y2 ⇒ y1==y2 | _ ⇒ false]
+ | pp y1 ⇒ match i2 with [ pp y2 ⇒ y1==y2 | _ ⇒ false]
+ | po i11 i12 ⇒ match i2 with
+ [ po i21 i22 ⇒ beqitem S i11 i21 ∧ beqitem S i12 i22
+ | _ ⇒ false]
+ | pc i11 i12 ⇒ match i2 with
+ [ pc i21 i22 ⇒ beqitem S i11 i21 ∧ beqitem S i12 i22
+ | _ ⇒ false]
+ | pk i11 ⇒ match i2 with [ pk i21 ⇒ beqitem S i11 i21 | _ ⇒ false]
+ ].
+
+lemma beqitem_true: ∀S,i1,i2. iff (beqitem S i1 i2 = true) (i1 = i2).
+#S #i1 elim i1
+ [#i2 cases i2 [||#a|#a|#i21 #i22| #i21 #i22|#i3] % // normalize #H destruct
+ |#i2 cases i2 [||#a|#a|#i21 #i22| #i21 #i22|#i3] % // normalize #H destruct
+ |#x #i2 cases i2 [||#a|#a|#i21 #i22| #i21 #i22|#i3] % normalize #H destruct
+ [>(\P H) // | @(\b (refl …))]
+ |#x #i2 cases i2 [||#a|#a|#i21 #i22| #i21 #i22|#i3] % normalize #H destruct
+ [>(\P H) // | @(\b (refl …))]
+ |#i11 #i12 #Hind1 #Hind2 #i2 cases i2 [||#a|#a|#i21 #i22| #i21 #i22|#i3] %
+ normalize #H destruct
+ [cases (true_or_false (beqitem S i11 i21)) #H1
+ [>(proj1 … (Hind1 i21) H1) >(proj1 … (Hind2 i22)) // >H1 in H; #H @H
+ |>H1 in H; normalize #abs @False_ind /2/
+ ]
+ |>(proj2 … (Hind1 i21) (refl …)) >(proj2 … (Hind2 i22) (refl …)) //
+ ]
+ |#i11 #i12 #Hind1 #Hind2 #i2 cases i2 [||#a|#a|#i21 #i22| #i21 #i22|#i3] %
+ normalize #H destruct
+ [cases (true_or_false (beqitem S i11 i21)) #H1
+ [>(proj1 … (Hind1 i21) H1) >(proj1 … (Hind2 i22)) // >H1 in H; #H @H
+ |>H1 in H; normalize #abs @False_ind /2/
+ ]
+ |>(proj2 … (Hind1 i21) (refl …)) >(proj2 … (Hind2 i22) (refl …)) //
+ ]
+ |#i3 #Hind #i2 cases i2 [||#a|#a|#i21 #i22| #i21 #i22|#i4] %
+ normalize #H destruct
+ [>(proj1 … (Hind i4) H) // |>(proj2 … (Hind i4) (refl …)) //]
+ ]
+qed.
+
+definition DeqItem ≝ λS.
+ mk_DeqSet (pitem S) (beqitem S) (beqitem_true S).
+
+(* We also add a couple of unification hints to allow the type inference system
+to look at (pitem S) as the carrier of a DeqSet, and at beqitem as if it was the
+equality function of a DeqSet. *)
+
+unification hint 0 ≔ S;
+ X ≟ mk_DeqSet (pitem S) (beqitem S) (beqitem_true S)
+(* ---------------------------------------- *) ⊢
+ pitem S ≡ carr X.
+
+unification hint 0 ≔ S,i1,i2;
+ X ≟ mk_DeqSet (pitem S) (beqitem S) (beqitem_true S)
+(* ---------------------------------------- *) ⊢
+ beqitem S i1 i2 ≡ eqb X i1 i2.
+
+(*
+Semantics of pointed regular expressions
+
+The intuitive semantic of a point is to mark the position where
+we should start reading the regular expression. The language associated
+to a pre is the union of the languages associated with its points. *)
+
+let rec in_pl (S : DeqSet) (r : pitem S) on r : word S → Prop ≝
+match r with
+[ pz ⇒ ∅
+| pe ⇒ ∅
+| ps _ ⇒ ∅
+| pp x ⇒ { (x::[]) }
+| pc r1 r2 ⇒ (in_pl ? r1) · \sem{forget ? r2} ∪ (in_pl ? r2)
+| po r1 r2 ⇒ (in_pl ? r1) ∪ (in_pl ? r2)
+| pk r1 ⇒ (in_pl ? r1) · \sem{forget ? r1}^* ].
+
+interpretation "in_pl" 'in_l E = (in_pl ? E).
+interpretation "in_pl mem" 'mem w l = (in_pl ? l w).
+
+definition in_prl ≝ λS : DeqSet.λp:pre S.
+ if (\snd p) then \sem{\fst p} ∪ {ϵ} else \sem{\fst p}.
+
+interpretation "in_prl mem" 'mem w l = (in_prl ? l w).
+interpretation "in_prl" 'in_l E = (in_prl ? E).
+
+(* The following, trivial lemmas are only meant for rewriting purposes. *)
+
+lemma sem_pre_true : ∀S.∀i:pitem S.
+ \sem{〈i,true〉} = \sem{i} ∪ {ϵ}.
+// qed.
+
+lemma sem_pre_false : ∀S.∀i:pitem S.
+ \sem{〈i,false〉} = \sem{i}.
+// qed.
+
+lemma sem_cat: ∀S.∀i1,i2:pitem S.
+ \sem{i1 · i2} = \sem{i1} · \sem{|i2|} ∪ \sem{i2}.
+// qed.
+
+lemma sem_cat_w: ∀S.∀i1,i2:pitem S.∀w.
+ \sem{i1 · i2} w = ((\sem{i1} · \sem{|i2|}) w ∨ \sem{i2} w).
+// qed.
+
+lemma sem_plus: ∀S.∀i1,i2:pitem S.
+ \sem{i1 + i2} = \sem{i1} ∪ \sem{i2}.
+// qed.
+
+lemma sem_plus_w: ∀S.∀i1,i2:pitem S.∀w.
+ \sem{i1 + i2} w = (\sem{i1} w ∨ \sem{i2} w).
+// qed.
+
+lemma sem_star : ∀S.∀i:pitem S.
+ \sem{i^*} = \sem{i} · \sem{|i|}^*.
+// qed.
+
+lemma sem_star_w : ∀S.∀i:pitem S.∀w.
+ \sem{i^*} w = (∃w1,w2.w1 @ w2 = w ∧ \sem{i} w1 ∧ \sem{|i|}^* w2).
+// qed.
+
+(* Below are a few, simple, semantic properties of items. In particular:
+- not_epsilon_item : ∀S:DeqSet.∀i:pitem S. ¬ (\sem{i} ϵ).
+- epsilon_pre : ∀S.∀e:pre S. (\sem{i} ϵ) ↔ (\snd e = true).
+- minus_eps_item: ∀S.∀i:pitem S. \sem{i} =1 \sem{i}-{[ ]}.
+- minus_eps_pre: ∀S.∀e:pre S. \sem{\fst e} =1 \sem{e}-{[ ]}.
+The first property is proved by a simple induction on $i$; the other
+results are easy corollaries. We need an auxiliary lemma first. *)
+
+lemma append_eq_nil : ∀S.∀w1,w2:word S. w1 @ w2 = ϵ → w1 = ϵ.
+#S #w1 #w2 cases w1 // #a #tl normalize #H destruct qed.
+
+lemma not_epsilon_lp : ∀S:DeqSet.∀e:pitem S. ¬ (ϵ ∈ e).
+#S #e elim e normalize /2/
+ [#r1 #r2 * #n1 #n2 % * /2/ * #w1 * #w2 * * #H
+ >(append_eq_nil …H…) /2/
+ |#r1 #r2 #n1 #n2 % * /2/
+ |#r #n % * #w1 * #w2 * * #H >(append_eq_nil …H…) /2/
+ ]
+qed.
+
+lemma epsilon_to_true : ∀S.∀e:pre S. ϵ ∈ e → \snd e = true.
+#S * #i #b cases b // normalize #H @False_ind /2/
+qed.
+
+lemma true_to_epsilon : ∀S.∀e:pre S. \snd e = true → ϵ ∈ e.
+#S * #i #b #btrue normalize in btrue; >btrue %2 //
+qed.
+
+lemma minus_eps_item: ∀S.∀i:pitem S. \sem{i} =1 \sem{i}-{[ ]}.
+#S #i #w %
+ [#H whd % // normalize @(not_to_not … (not_epsilon_lp …i)) //
+ |* //
+ ]
+qed.
+
+lemma minus_eps_pre: ∀S.∀e:pre S. \sem{\fst e} =1 \sem{e}-{[ ]}.
+#S * #i *
+ [>sem_pre_true normalize in ⊢ (??%?); #w %
+ [/3/ | * * // #H1 #H2 @False_ind @(absurd …H1 H2)]
+ |>sem_pre_false normalize in ⊢ (??%?); #w % [ /3/ | * // ]
+ ]
+qed.
\ No newline at end of file
--- /dev/null
+(*
+Broadcasting points
+
+Intuitively, a regular expression e must be understood as a pointed expression with a single
+point in front of it. Since however we only allow points before symbols, we must broadcast
+this initial point inside e traversing all nullable subexpressions, that essentially corresponds
+to the ϵ-closure operation on automata. We use the notation •(_) to denote such an operation;
+its definition is the expected one: let us start discussing an example.
+
+Example
+Let us broadcast a point inside (a + ϵ)(b*a + b)b. We start working in parallel on the
+first occurrence of a (where the point stops), and on ϵ that gets traversed. We have hence
+reached the end of a + ϵ and we must pursue broadcasting inside (b*a + b)b. Again, we work in
+parallel on the two additive subterms b^*a and b; the first point is allowed to both enter the
+star, and to traverse it, stopping in front of a; the second point just stops in front of b.
+No point reached that end of b^*a + b hence no further propagation is possible. In conclusion:
+ •((a + ϵ)(b^*a + b)b) = 〈(•a + ϵ)((•b)^*•a + •b)b, false〉
+*)
+
+include "tutorial/chapter7.ma".
+
+(* Broadcasting a point inside an item generates a pre, since the point could possibly reach
+the end of the expression.
+Broadcasting inside a i1+i2 amounts to broadcast in parallel inside i1 and i2.
+If we define
+ 〈i1,b1〉 ⊕ 〈i2,b2〉 = 〈i1 + i2, b1∨ b2〉
+then, we just have •(i1+i2) = •(i1)⊕ •(i2).
+*)
+
+definition lo ≝ λS:DeqSet.λa,b:pre S.〈\fst a + \fst b,\snd a ∨ \snd b〉.
+notation "a ⊕ b" left associative with precedence 60 for @{'oplus $a $b}.
+interpretation "oplus" 'oplus a b = (lo ? a b).
+
+lemma lo_def: ∀S.∀i1,i2:pitem S.∀b1,b2. 〈i1,b1〉⊕〈i2,b2〉=〈i1+i2,b1∨b2〉.
+// qed.
+
+(*
+Concatenation is a bit more complex. In order to broadcast a point inside i1 · i2
+we should start broadcasting it inside i1 and then proceed into i2 if and only if a
+point reached the end of i1. This suggests to define •(i1 · i2) as •(i1) ▹ i2, where
+e ▹ i is a general operation of concatenation between a pre and an item, defined by
+cases on the boolean in e:
+
+ 〈i1,true〉 ▹ i2 = i1 ◃ •(i_2)
+ 〈i1,false〉 ▹ i2 = i1 · i2
+In turn, ◃ says how to concatenate an item with a pre, that is however extremely simple:
+ i1 ◃ 〈i1,b〉 = 〈i_1 · i2, b〉
+Let us come to the formalized definitions:
+*)
+
+definition pre_concat_r ≝ λS:DeqSet.λi:pitem S.λe:pre S.
+ match e with [ mk_Prod i1 b ⇒ 〈i · i1, b〉].
+
+notation "i ◃ e" left associative with precedence 60 for @{'lhd $i $e}.
+interpretation "pre_concat_r" 'lhd i e = (pre_concat_r ? i e).
+
+lemma eq_to_ex_eq: ∀S.∀A,B:word S → Prop.
+ A = B → A =1 B.
+#S #A #B #H >H #x % // qed.
+
+(* The behaviour of ◃ is summarized by the following, easy lemma: *)
+
+lemma sem_pre_concat_r : ∀S,i.∀e:pre S.
+ \sem{i ◃ e} =1 \sem{i} · \sem{|\fst e|} ∪ \sem{e}.
+#S #i * #i1 #b1 cases b1 [2: @eq_to_ex_eq //]
+>sem_pre_true >sem_cat >sem_pre_true /2/
+qed.
+
+(* The definition of $•(-)$ (eclose) and ▹ (pre_concat_l) are mutually recursive.
+In this situation, a viable alternative that is usually simpler to reason about,
+is to abstract one of the two functions with respect to the other. In particular
+we abstract pre_concat_l with respect to an input bcast function from items to
+pres. *)
+
+definition pre_concat_l ≝ λS:DeqSet.λbcast:∀S:DeqSet.pitem S → pre S.λe1:pre S.λi2:pitem S.
+ match e1 with
+ [ mk_Prod i1 b1 ⇒ match b1 with
+ [ true ⇒ (i1 ◃ (bcast ? i2))
+ | false ⇒ 〈i1 · i2,false〉
+ ]
+ ].
+
+notation "a ▹ b" left associative with precedence 60 for @{'tril eclose $a $b}.
+interpretation "item-pre concat" 'tril op a b = (pre_concat_l ? op a b).
+
+(* We are ready to give the formal definition of the broadcasting operation. *)
+
+notation "•" non associative with precedence 60 for @{eclose ?}.
+
+let rec eclose (S: DeqSet) (i: pitem S) on i : pre S ≝
+ match i with
+ [ pz ⇒ 〈 pz ?, false 〉
+ | pe ⇒ 〈 ϵ, true 〉
+ | ps x ⇒ 〈 `.x, false 〉
+ | pp x ⇒ 〈 `.x, false 〉
+ | po i1 i2 ⇒ •i1 ⊕ •i2
+ | pc i1 i2 ⇒ •i1 ▹ i2
+ | pk i ⇒ 〈(\fst (•i))^*,true〉].
+
+notation "• x" non associative with precedence 60 for @{'eclose $x}.
+interpretation "eclose" 'eclose x = (eclose ? x).
+
+(* Here are a few simple properties of ▹ and •(-) *)
+
+lemma pcl_true : ∀S.∀i1,i2:pitem S.
+ 〈i1,true〉 ▹ i2 = i1 ◃ (•i2).
+// qed.
+
+lemma pcl_true_bis : ∀S.∀i1,i2:pitem S.
+ 〈i1,true〉 ▹ i2 = 〈i1 · \fst (•i2), \snd (•i2)〉.
+#S #i1 #i2 normalize cases (•i2) // qed.
+
+lemma pcl_false: ∀S.∀i1,i2:pitem S.
+ 〈i1,false〉 ▹ i2 = 〈i1 · i2, false〉.
+// qed.
+
+lemma eclose_plus: ∀S:DeqSet.∀i1,i2:pitem S.
+ •(i1 + i2) = •i1 ⊕ •i2.
+// qed.
+
+lemma eclose_dot: ∀S:DeqSet.∀i1,i2:pitem S.
+ •(i1 · i2) = •i1 ▹ i2.
+// qed.
+
+lemma eclose_star: ∀S:DeqSet.∀i:pitem S.
+ •i^* = 〈(\fst(•i))^*,true〉.
+// qed.
+
+(* The definition of •(-) (eclose) can then be lifted from items to pres
+in the obvious way. *)
+
+definition lift ≝ λS.λf:pitem S →pre S.λe:pre S.
+ match e with
+ [ mk_Prod i b ⇒ 〈\fst (f i), \snd (f i) ∨ b〉].
+
+definition preclose ≝ λS. lift S (eclose S).
+interpretation "preclose" 'eclose x = (preclose ? x).
+
+(* Obviously, broadcasting does not change the carrier of the item,
+as it is easily proved by structural induction. *)
+
+lemma erase_bull : ∀S.∀i:pitem S. |\fst (•i)| = |i|.
+#S #i elim i //
+ [ #i1 #i2 #IH1 #IH2 >erase_dot <IH1 >eclose_dot
+ cases (•i1) #i11 #b1 cases b1 // <IH2 >pcl_true_bis //
+ | #i1 #i2 #IH1 #IH2 >eclose_plus >(erase_plus … i1) <IH1 <IH2
+ cases (•i1) #i11 #b1 cases (•i2) #i21 #b2 //
+ | #i #IH >eclose_star >(erase_star … i) <IH cases (•i) //
+ ]
+qed.
+
+(* We are now ready to state the main semantic properties of ⊕, ◃ and •(-):
+
+sem_oplus: \sem{e1 ⊕ e2} =1 \sem{e1} ∪ \sem{e2}
+sem_pcl: \sem{e1 ▹ i2} =1 \sem{e1} · \sem{|i2|} ∪ \sem{i2}
+sem_bullet \sem{•i} =1 \sem{i} ∪ \sem{|i|}
+
+The proof of sem_oplus is straightforward. *)
+
+lemma sem_oplus: ∀S:DeqSet.∀e1,e2:pre S.
+ \sem{e1 ⊕ e2} =1 \sem{e1} ∪ \sem{e2}.
+#S * #i1 #b1 * #i2 #b2 #w %
+ [cases b1 cases b2 normalize /2/ * /3/ * /3/
+ |cases b1 cases b2 normalize /2/ * /3/ * /3/
+ ]
+qed.
+
+(* For the others, we proceed as follow: we first prove the following
+auxiliary lemma, that assumes sem_bullet:
+
+sem_pcl_aux:
+ \sem{•i2} =1 \sem{i2} ∪ \sem{|i2|} →
+ \sem{e1 ▹ i2} =1 \sem{e1} · \sem{|i2|} ∪ \sem{i2}.
+
+Then, using the previous result, we prove sem_bullet by induction
+on i. Finally, sem_pcl_aux and sem_bullet give sem_pcl. *)
+
+lemma LcatE : ∀S.∀e1,e2:pitem S.
+ \sem{e1 · e2} = \sem{e1} · \sem{|e2|} ∪ \sem{e2}.
+// qed.
+
+lemma sem_pcl_aux : ∀S.∀e1:pre S.∀i2:pitem S.
+ \sem{•i2} =1 \sem{i2} ∪ \sem{|i2|} →
+ \sem{e1 ▹ i2} =1 \sem{e1} · \sem{|i2|} ∪ \sem{i2}.
+#S * #i1 #b1 #i2 cases b1
+ [2:#th >pcl_false >sem_pre_false >sem_pre_false >sem_cat /2/
+ |#H >pcl_true >sem_pre_true @(eqP_trans … (sem_pre_concat_r …))
+ >erase_bull @eqP_trans [|@(eqP_union_l … H)]
+ @eqP_trans [|@eqP_union_l[|@union_comm ]]
+ @eqP_trans [|@eqP_sym @union_assoc ] /3/
+ ]
+qed.
+
+lemma minus_eps_pre_aux: ∀S.∀e:pre S.∀i:pitem S.∀A.
+ \sem{e} =1 \sem{i} ∪ A → \sem{\fst e} =1 \sem{i} ∪ (A - {[ ]}).
+#S #e #i #A #seme
+@eqP_trans [|@minus_eps_pre]
+@eqP_trans [||@eqP_union_r [|@eqP_sym @minus_eps_item]]
+@eqP_trans [||@distribute_substract]
+@eqP_substract_r //
+qed.
+
+theorem sem_bull: ∀S:DeqSet. ∀i:pitem S. \sem{•i} =1 \sem{i} ∪ \sem{|i|}.
+#S #e elim e
+ [#w normalize % [/2/ | * //]
+ |/2/
+ |#x normalize #w % [ /2/ | * [@False_ind | //]]
+ |#x normalize #w % [ /2/ | * // ]
+ |#i1 #i2 #IH1 #IH2 >eclose_dot
+ @eqP_trans [|@sem_pcl_aux //] >sem_cat
+ @eqP_trans
+ [|@eqP_union_r
+ [|@eqP_trans [|@(cat_ext_l … IH1)] @distr_cat_r]]
+ @eqP_trans [|@union_assoc]
+ @eqP_trans [||@eqP_sym @union_assoc]
+ @eqP_union_l //
+ |#i1 #i2 #IH1 #IH2 >eclose_plus
+ @eqP_trans [|@sem_oplus] >sem_plus >erase_plus
+ @eqP_trans [|@(eqP_union_l … IH2)]
+ @eqP_trans [|@eqP_sym @union_assoc]
+ @eqP_trans [||@union_assoc] @eqP_union_r
+ @eqP_trans [||@eqP_sym @union_assoc]
+ @eqP_trans [||@eqP_union_l [|@union_comm]]
+ @eqP_trans [||@union_assoc] /2/
+ |#i #H >sem_pre_true >sem_star >erase_bull >sem_star
+ @eqP_trans [|@eqP_union_r [|@cat_ext_l [|@minus_eps_pre_aux //]]]
+ @eqP_trans [|@eqP_union_r [|@distr_cat_r]]
+ @eqP_trans [|@union_assoc] @eqP_union_l >erase_star
+ @eqP_sym @star_fix_eps
+ ]
+qed.
+
+(*
+Blank item
+
+
+As a corollary of theorem sem_bullet, given a regular expression e, we can easily
+find an item with the same semantics of $e$: it is enough to get an item (blank e)
+having e as carrier and no point, and then broadcast a point in it. The semantics of
+(blank e) is obviously the empty language: from the point of view of the automaton,
+it corresponds with the pit state. *)
+
+let rec blank (S: DeqSet) (i: re S) on i :pitem S ≝
+ match i with
+ [ z ⇒ pz ?
+ | e ⇒ ϵ
+ | s y ⇒ `y
+ | o e1 e2 ⇒ (blank S e1) + (blank S e2)
+ | c e1 e2 ⇒ (blank S e1) · (blank S e2)
+ | k e ⇒ (blank S e)^* ].
+
+lemma forget_blank: ∀S.∀e:re S.|blank S e| = e.
+#S #e elim e normalize //
+qed.
+
+lemma sem_blank: ∀S.∀e:re S.\sem{blank S e} =1 ∅.
+#S #e elim e
+ [1,2:@eq_to_ex_eq //
+ |#s @eq_to_ex_eq //
+ |#e1 #e2 #Hind1 #Hind2 >sem_cat
+ @eqP_trans [||@(union_empty_r … ∅)]
+ @eqP_trans [|@eqP_union_l[|@Hind2]] @eqP_union_r
+ @eqP_trans [||@(cat_empty_l … ?)] @cat_ext_l @Hind1
+ |#e1 #e2 #Hind1 #Hind2 >sem_plus
+ @eqP_trans [||@(union_empty_r … ∅)]
+ @eqP_trans [|@eqP_union_l[|@Hind2]] @eqP_union_r @Hind1
+ |#e #Hind >sem_star
+ @eqP_trans [||@(cat_empty_l … ?)] @cat_ext_l @Hind
+ ]
+qed.
+
+theorem re_embedding: ∀S.∀e:re S.
+ \sem{•(blank S e)} =1 \sem{e}.
+#S #e @eqP_trans [|@sem_bull] >forget_blank
+@eqP_trans [|@eqP_union_r [|@sem_blank]]
+@eqP_trans [|@union_comm] @union_empty_r.
+qed.
+
+(*
+Lifted Operators
+
+
+Plus and bullet have been already lifted from items to pres. We can now
+do a similar job for concatenation ⊙ and Kleene's star ⊛.*)
+
+definition lifted_cat ≝ λS:DeqSet.λe:pre S.
+ lift S (pre_concat_l S eclose e).
+
+notation "e1 ⊙ e2" left associative with precedence 70 for @{'odot $e1 $e2}.
+
+interpretation "lifted cat" 'odot e1 e2 = (lifted_cat ? e1 e2).
+
+lemma odot_true_b : ∀S.∀i1,i2:pitem S.∀b.
+ 〈i1,true〉 ⊙ 〈i2,b〉 = 〈i1 · (\fst (•i2)),\snd (•i2) ∨ b〉.
+#S #i1 #i2 #b normalize in ⊢ (??%?); cases (•i2) //
+qed.
+
+lemma odot_false_b : ∀S.∀i1,i2:pitem S.∀b.
+ 〈i1,false〉 ⊙ 〈i2,b〉 = 〈i1 · i2 ,b〉.
+//
+qed.
+
+lemma erase_odot:∀S.∀e1,e2:pre S.
+ |\fst (e1 ⊙ e2)| = |\fst e1| · (|\fst e2|).
+#S * #i1 * * #i2 #b2 // >odot_true_b >erase_dot //
+qed.
+
+(* Let us come to the star operation: *)
+
+definition lk ≝ λS:DeqSet.λe:pre S.
+ match e with
+ [ mk_Prod i1 b1 ⇒
+ match b1 with
+ [true ⇒ 〈(\fst (eclose ? i1))^*, true〉
+ |false ⇒ 〈i1^*,false〉
+ ]
+ ].
+
+(* notation < "a \sup ⊛" non associative with precedence 90 for @{'lk $a}.*)
+interpretation "lk" 'lk a = (lk ? a).
+notation "a^⊛" non associative with precedence 90 for @{'lk $a}.
+
+lemma ostar_true: ∀S.∀i:pitem S.
+ 〈i,true〉^⊛ = 〈(\fst (•i))^*, true〉.
+// qed.
+
+lemma ostar_false: ∀S.∀i:pitem S.
+ 〈i,false〉^⊛ = 〈i^*, false〉.
+// qed.
+
+lemma erase_ostar: ∀S.∀e:pre S.
+ |\fst (e^⊛)| = |\fst e|^*.
+#S * #i * // qed.
+
+lemma sem_odot_true: ∀S:DeqSet.∀e1:pre S.∀i.
+ \sem{e1 ⊙ 〈i,true〉} =1 \sem{e1 ▹ i} ∪ { [ ] }.
+#S #e1 #i
+cut (e1 ⊙ 〈i,true〉 = 〈\fst (e1 ▹ i), \snd(e1 ▹ i) ∨ true〉) [//]
+#H >H cases (e1 ▹ i) #i1 #b1 cases b1
+ [>sem_pre_true @eqP_trans [||@eqP_sym @union_assoc]
+ @eqP_union_l /2/
+ |/2/
+ ]
+qed.
+
+lemma eq_odot_false: ∀S:DeqSet.∀e1:pre S.∀i.
+ e1 ⊙ 〈i,false〉 = e1 ▹ i.
+#S #e1 #i
+cut (e1 ⊙ 〈i,false〉 = 〈\fst (e1 ▹ i), \snd(e1 ▹ i) ∨ false〉) [//]
+cases (e1 ▹ i) #i1 #b1 cases b1 #H @H
+qed.
+
+(* We conclude this section with the proof of the main semantic properties
+of ⊙ and ⊛. *)
+
+lemma sem_odot:
+ ∀S.∀e1,e2: pre S. \sem{e1 ⊙ e2} =1 \sem{e1}· \sem{|\fst e2|} ∪ \sem{e2}.
+#S #e1 * #i2 *
+ [>sem_pre_true
+ @eqP_trans [|@sem_odot_true]
+ @eqP_trans [||@union_assoc] @eqP_union_r @sem_pcl_aux //
+ |>sem_pre_false >eq_odot_false @sem_pcl_aux //
+ ]
+qed.
+
+theorem sem_ostar: ∀S.∀e:pre S.
+ \sem{e^⊛} =1 \sem{e} · \sem{|\fst e|}^*.
+#S * #i #b cases b
+ [>sem_pre_true >sem_pre_true >sem_star >erase_bull
+ @eqP_trans [|@eqP_union_r[|@cat_ext_l [|@minus_eps_pre_aux //]]]
+ @eqP_trans [|@eqP_union_r [|@distr_cat_r]]
+ @eqP_trans [||@eqP_sym @distr_cat_r]
+ @eqP_trans [|@union_assoc] @eqP_union_l
+ @eqP_trans [||@eqP_sym @epsilon_cat_l] @eqP_sym @star_fix_eps
+ |>sem_pre_false >sem_pre_false >sem_star /2/
+ ]
+qed.
\ No newline at end of file
--- /dev/null
+(*
+Moves
+We now define the move operation, that corresponds to the advancement of the
+state in response to the processing of an input character a. The intuition is
+clear: we have to look at points inside $e$ preceding the given character a,
+let the point traverse the character, and broadcast it. All other points must
+be removed.
+
+We can give a particularly elegant definition in terms of the
+lifted operators of the previous section:
+*)
+
+include "tutorial/chapter8.ma".
+
+let rec move (S: DeqSet) (x:S) (E: pitem S) on E : pre S ≝
+ match E with
+ [ pz ⇒ 〈 pz S, false 〉
+ | pe ⇒ 〈 ϵ, false 〉
+ | ps y ⇒ 〈 `y, false 〉
+ | pp y ⇒ 〈 `y, x == y 〉
+ | po e1 e2 ⇒ (move ? x e1) ⊕ (move ? x e2)
+ | pc e1 e2 ⇒ (move ? x e1) ⊙ (move ? x e2)
+ | pk e ⇒ (move ? x e)^⊛ ].
+
+lemma move_plus: ∀S:DeqSet.∀x:S.∀i1,i2:pitem S.
+ move S x (i1 + i2) = (move ? x i1) ⊕ (move ? x i2).
+// qed.
+
+lemma move_cat: ∀S:DeqSet.∀x:S.∀i1,i2:pitem S.
+ move S x (i1 · i2) = (move ? x i1) ⊙ (move ? x i2).
+// qed.
+
+lemma move_star: ∀S:DeqSet.∀x:S.∀i:pitem S.
+ move S x i^* = (move ? x i)^⊛.
+// qed.
+
+(*
+Example. Let us consider the item
+
+ (•a + ϵ)((•b)*•a + •b)b
+
+and the two moves w.r.t. the characters a and b.
+For a, we have two possible positions (all other points gets erased); the innermost
+point stops in front of the final b, while the other one broadcast inside (b^*a + b)b,
+so
+
+ move((•a + ϵ)((•b)*•a + •b)b,a) = 〈(a + ϵ)((•b)^*•a + •b)•b, false〉
+
+For b, we have two positions too. The innermost point stops in front of the final b too,
+while the other point reaches the end of b* and must go back through b*a:
+
+ move((•a + ϵ)((•b)*•a + •b)b ,b) = 〈(a + ϵ)((•b)*•a + b)•b, false〉
+
+*)
+
+definition pmove ≝ λS:DeqSet.λx:S.λe:pre S. move ? x (\fst e).
+
+lemma pmove_def : ∀S:DeqSet.∀x:S.∀i:pitem S.∀b.
+ pmove ? x 〈i,b〉 = move ? x i.
+// qed.
+
+lemma eq_to_eq_hd: ∀A.∀l1,l2:list A.∀a,b.
+ a::l1 = b::l2 → a = b.
+#A #l1 #l2 #a #b #H destruct //
+qed.
+
+(* Obviously, a move does not change the carrier of the item, as one can easily
+prove by induction on the item. *)
+
+lemma same_kernel: ∀S:DeqSet.∀a:S.∀i:pitem S.
+ |\fst (move ? a i)| = |i|.
+#S #a #i elim i //
+ [#i1 #i2 #H1 #H2 >move_cat >erase_odot //
+ |#i1 #i2 #H1 #H2 >move_plus whd in ⊢ (??%%); //
+ ]
+qed.
+
+(* Here is our first, major result, stating the correctness of the
+move operation. The proof is a simple induction on i. *)
+
+theorem move_ok:
+ ∀S:DeqSet.∀a:S.∀i:pitem S.∀w: word S.
+ \sem{move ? a i} w ↔ \sem{i} (a::w).
+#S #a #i elim i
+ [normalize /2/
+ |normalize /2/
+ |normalize /2/
+ |normalize #x #w cases (true_or_false (a==x)) #H >H normalize
+ [>(\P H) % [* // #bot @False_ind //| #H1 destruct /2/]
+ |% [@False_ind |#H1 cases (\Pf H) #H2 @H2 destruct //]
+ ]
+ |#i1 #i2 #HI1 #HI2 #w >move_cat
+ @iff_trans[|@sem_odot] >same_kernel >sem_cat_w
+ @iff_trans[||@(iff_or_l … (HI2 w))] @iff_or_r
+ @iff_trans[||@iff_sym @deriv_middot //]
+ @cat_ext_l @HI1
+ |#i1 #i2 #HI1 #HI2 #w >(sem_plus S i1 i2) >move_plus >sem_plus_w
+ @iff_trans[|@sem_oplus]
+ @iff_trans[|@iff_or_l [|@HI2]| @iff_or_r //]
+ |#i1 #HI1 #w >move_star
+ @iff_trans[|@sem_ostar] >same_kernel >sem_star_w
+ @iff_trans[||@iff_sym @deriv_middot //]
+ @cat_ext_l @HI1
+ ]
+qed.
+
+(* The move operation is generalized to strings in the obvious way. *)
+
+notation > "x ↦* E" non associative with precedence 60 for @{moves ? $x $E}.
+
+let rec moves (S : DeqSet) w e on w : pre S ≝
+ match w with
+ [ nil ⇒ e
+ | cons x w' ⇒ w' ↦* (move S x (\fst e))].
+
+lemma moves_empty: ∀S:DeqSet.∀e:pre S.
+ moves ? [ ] e = e.
+// qed.
+
+lemma moves_cons: ∀S:DeqSet.∀a:S.∀w.∀e:pre S.
+ moves ? (a::w) e = moves ? w (move S a (\fst e)).
+// qed.
+
+lemma moves_left : ∀S,a,w,e.
+ moves S (w@(a::[])) e = move S a (\fst (moves S w e)).
+#S #a #w elim w // #x #tl #Hind #e >moves_cons >moves_cons //
+qed.
+
+lemma not_epsilon_sem: ∀S:DeqSet.∀a:S.∀w: word S. ∀e:pre S.
+ iff ((a::w) ∈ e) ((a::w) ∈ \fst e).
+#S #a #w * #i #b cases b normalize
+ [% /2/ * // #H destruct |% normalize /2/]
+qed.
+
+lemma same_kernel_moves: ∀S:DeqSet.∀w.∀e:pre S.
+ |\fst (moves ? w e)| = |\fst e|.
+#S #w elim w //
+qed.
+
+theorem decidable_sem: ∀S:DeqSet.∀w: word S. ∀e:pre S.
+ (\snd (moves ? w e) = true) ↔ \sem{e} w.
+#S #w elim w
+ [* #i #b >moves_empty cases b % /2/
+ |#a #w1 #Hind #e >moves_cons
+ @iff_trans [||@iff_sym @not_epsilon_sem]
+ @iff_trans [||@move_ok] @Hind
+ ]
+qed.
+
+(* It is now clear that we can build a DFA D_e for e by taking pre as states,
+and move as transition function; the initial state is •(e) and a state 〈i,b〉 is
+final if and only if b is true. The fact that states in D_e are finite is obvious:
+in fact, their cardinality is at most 2^{n+1} where n is the number of symbols in
+e. This is one of the advantages of pointed regular expressions w.r.t. derivatives,
+whose finite nature only holds after a suitable quotient.
+
+Let us discuss a couple of examples.
+
+Example.
+Below is the DFA associated with the regular expression (ac+bc)*.
+
+DFA for (ac+bc)
+
+The graphical description of the automaton is the traditional one, with nodes for
+states and labelled arcs for transitions. Unreachable states are not shown.
+Final states are emphasized by a double circle: since a state 〈e,b〉 is final if and
+only if b is true, we may just label nodes with the item.
+The automaton is not minimal: it is easy to see that the two states corresponding to
+the items (a•c +bc)* and (ac+b•c)* are equivalent (a way to prove it is to observe
+that they define the same language!). In fact, an important property of pres e is that
+each state has a clear semantics, given in terms of the specification e and not of the
+behaviour of the automaton. As a consequence, the construction of the automaton is not
+only direct, but also extremely intuitive and locally verifiable.
+
+Let us consider a more complex case.
+
+Example.
+Starting form the regular expression (a+ϵ)(b*a + b)b, we obtain the following automaton.
+
+DFA for (a+ϵ)(b*a + b)b
+
+Remarkably, this DFA is minimal, testifying the small number of states produced by our
+technique (the pair of states 6-8 and 7-9 differ for the fact that 6 and 7
+are final, while 8 and 9 are not).
+
+
+Move to pit
+.
+
+We conclude this chapter with a few properties of the move opertions in relation
+with the pit state. *)
+
+definition pit_pre ≝ λS.λi.〈blank S (|i|), false〉.
+
+(* The following function compute the list of characters occurring in a given
+item i. *)
+
+let rec occur (S: DeqSet) (i: re S) on i ≝
+ match i with
+ [ z ⇒ [ ]
+ | e ⇒ [ ]
+ | s y ⇒ y::[]
+ | o e1 e2 ⇒ unique_append ? (occur S e1) (occur S e2)
+ | c e1 e2 ⇒ unique_append ? (occur S e1) (occur S e2)
+ | k e ⇒ occur S e].
+
+(* If a symbol a does not occur in i, then move(i,a) gets to the
+pit state. *)
+
+lemma not_occur_to_pit: ∀S,a.∀i:pitem S. memb S a (occur S (|i|)) ≠ true →
+ move S a i = pit_pre S i.
+#S #a #i elim i //
+ [#x normalize cases (a==x) normalize // #H @False_ind /2/
+ |#i1 #i2 #Hind1 #Hind2 #H >move_cat
+ >Hind1 [2:@(not_to_not … H) #H1 @sublist_unique_append_l1 //]
+ >Hind2 [2:@(not_to_not … H) #H1 @sublist_unique_append_l2 //] //
+ |#i1 #i2 #Hind1 #Hind2 #H >move_plus
+ >Hind1 [2:@(not_to_not … H) #H1 @sublist_unique_append_l1 //]
+ >Hind2 [2:@(not_to_not … H) #H1 @sublist_unique_append_l2 //] //
+ |#i #Hind #H >move_star >Hind //
+ ]
+qed.
+
+(* We cannot escape form the pit state. *)
+
+lemma move_pit: ∀S,a,i. move S a (\fst (pit_pre S i)) = pit_pre S i.
+#S #a #i elim i //
+ [#i1 #i2 #Hind1 #Hind2 >move_cat >Hind1 >Hind2 //
+ |#i1 #i2 #Hind1 #Hind2 >move_plus >Hind1 >Hind2 //
+ |#i #Hind >move_star >Hind //
+ ]
+qed.
+
+lemma moves_pit: ∀S,w,i. moves S w (pit_pre S i) = pit_pre S i.
+#S #w #i elim w //
+qed.
+
+(* If any character in w does not occur in i, then moves(i,w) gets
+to the pit state. *)
+
+lemma to_pit: ∀S,w,e. ¬ sublist S w (occur S (|\fst e|)) →
+ moves S w e = pit_pre S (\fst e).
+#S #w elim w
+ [#e * #H @False_ind @H normalize #a #abs @False_ind /2/
+ |#a #tl #Hind #e #H cases (true_or_false (memb S a (occur S (|\fst e|))))
+ [#Htrue >moves_cons whd in ⊢ (???%); <(same_kernel … a)
+ @Hind >same_kernel @(not_to_not … H) #H1 #b #memb cases (orb_true_l … memb)
+ [#H2 >(\P H2) // |#H2 @H1 //]
+ |#Hfalse >moves_cons >not_occur_to_pit // >Hfalse /2/
+ ]
+ ]
+qed.
\ No newline at end of file