X-Git-Url: http://matita.cs.unibo.it/gitweb/?a=blobdiff_plain;f=weblib%2Ftutorial%2Fchapter3.ma;h=3d7eb7a56f60b61b6e5a137787c71e94f72316f9;hb=40e98e3e7cd9ae57ee0ef3f58818b6f28f2e456f;hp=f7ad9155715878b850f9441dc9a1df70d4aaa716;hpb=e8e18e32e06c0406c12a873a01eb489a35b828c8;p=helm.git diff --git a/weblib/tutorial/chapter3.ma b/weblib/tutorial/chapter3.ma index f7ad91557..3d7eb7a56 100644 --- a/weblib/tutorial/chapter3.ma +++ b/weblib/tutorial/chapter3.ma @@ -59,14 +59,14 @@ function.*) let rec append A (l1: a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A) l2 on l1 ≝ match l1 with [ nil ⇒ l2 - | cons hd tl ⇒ hd a title="cons" href="cic:/fakeuri.def(1)":/a: append A tl l2 ]. + | cons hd tl ⇒ hd a title="cons" href="cic:/fakeuri.def(1)":/aspan class="error" title="Parse error: [sym:] expected after [sym:] (in [term])"/span: append A tl l2 ]. interpretation "append" 'append l1 l2 = (append ? l1 l2). (* As usual, the function is executable. For instance, (append A nil l) reduces to l, as shown by the following example: *) -example nil_append: ∀A.∀l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A. a title="nil" href="cic:/fakeuri.def(1)"[/a] a title="append" href="cic:/fakeuri.def(1)"@/a l a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a l. +example nil_append: ∀A.∀l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A. a title="nil" href="cic:/fakeuri.def(1)"[/aspan class="error" title="Parse error: [term] expected after [sym[] (in [term])"/span] a title="append" href="cic:/fakeuri.def(1)"@/a l a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a l. #A #l normalize // qed. (* Proving that l @ [] = l is just a bit more complex. The situation is exactly @@ -74,7 +74,7 @@ the same as for the addition operation of the previous chapter: since append is defined by recutsion over the first argument, the computation of l @ [] is stuck, and we must proceed by induction on l *) -lemma append_nil: ∀A.∀l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A.l a title="append" href="cic:/fakeuri.def(1)"@/a a title="nil" href="cic:/fakeuri.def(1)"[/a] a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a l. +lemma append_nil: ∀A.∀l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A.l a title="append" href="cic:/fakeuri.def(1)"@/aspan class="error" title="Parse error: [term level 46] expected after [sym@] (in [term])"/spanspan class="error" title="Parse error: [term level 46] expected after [sym@] (in [term])"/span a title="nil" href="cic:/fakeuri.def(1)"[/a] a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a l. #A #l (elim l) normalize // qed. (* similarly, we can define the two functions head and tail. Since we can only define @@ -88,7 +88,7 @@ definition head ≝ λA.λl: a href="cic:/matita/tutorial/chapter3/list.ind(1,0 definition tail ≝ λA.λl: a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A. match l with [ nil ⇒ a title="nil" href="cic:/fakeuri.def(1)"[/a] | cons hd tl ⇒ tl]. -example ex_head: ∀A.∀a,d,l. a href="cic:/matita/tutorial/chapter3/head.def(1)"head/a A (aa title="cons" href="cic:/fakeuri.def(1)":/a:l) d a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a. +example ex_head: ∀A.∀a,d,l. a href="cic:/matita/tutorial/chapter3/head.def(1)"head/a A (aa title="cons" href="cic:/fakeuri.def(1)":/a:l) d a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/aspan class="error" title="Parse error: [term] expected after [sym=] (in [term])"/spanspan class="error" title="Parse error: [term] expected after [sym=] (in [term])"/span a. #A #a #d #l normalize // qed. (* Problemi con la notazione *) @@ -123,7 +123,7 @@ let rec nth n (A:Type[0]) (l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0 example ex_length: a href="cic:/matita/tutorial/chapter3/length.fix(0,1,1)"length/a ? (a href="cic:/matita/tutorial/chapter3/list.con(0,2,1)"cons/a ? a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a a title="nil" href="cic:/fakeuri.def(1)"[/a]) a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a href="cic:/matita/tutorial/chapter2/nat.con(0,2,0)"S/a a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a. normalize // qed. -example ex_nth: a href="cic:/matita/tutorial/chapter3/nth.fix(0,0,2)"nth/a (a href="cic:/matita/tutorial/chapter2/nat.con(0,2,0)"S/a a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a) ? (a href="cic:/matita/tutorial/chapter3/list.con(0,2,1)"cons/a ? (a href="cic:/matita/tutorial/chapter2/nat.con(0,2,0)"S/a a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a) (a href="cic:/matita/tutorial/chapter3/list.con(0,2,1)"cons/a ? a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a a title="nil" href="cic:/fakeuri.def(1)"[/a])) a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a. +example ex_nth: a href="cic:/matita/tutorial/chapter3/nth.fix(0,0,2)"nth/a (a href="cic:/matita/tutorial/chapter2/nat.con(0,2,0)"S/a a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a) ? (a href="cic:/matita/tutorial/chapter3/list.con(0,2,1)"cons/a ? (a href="cic:/matita/tutorial/chapter2/nat.con(0,2,0)"S/a a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a) (a href="cic:/matita/tutorial/chapter3/list.con(0,2,1)"cons/a ? a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a a title="nil" href="cic:/fakeuri.def(1)"[/aspan class="error" title="Parse error: [term] expected after [sym[] (in [term])"/span])) a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a. normalize // qed. (* Proving that the length of l1@l2 is the sum of the lengths of l1 @@ -196,7 +196,7 @@ otherwise. We use an if_then_else function included from bool.ma to this purpose definition filter ≝ λT.λp:T → a href="cic:/matita/basics/bool/bool.ind(1,0,0)"bool/a. - a href="cic:/matita/tutorial/chapter3/foldr.fix(0,4,1)"foldr/a T (a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a T) (λx,l0.a href="cic:/matita/basics/bool/if_then_else.def(1)"if_then_else/a ? (p x) (xa title="cons" href="cic:/fakeuri.def(1)":/a:l0) l0) a title="nil" href="cic:/fakeuri.def(1)"[/a]. + a href="cic:/matita/tutorial/chapter3/foldr.fix(0,4,1)"foldr/a T (a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a T) (λx,l0. if p x then xa title="cons" href="cic:/fakeuri.def(1)":/a:l0 else l0) a title="nil" href="cic:/fakeuri.def(1)"[/a]. (* Here are a couple of simple lemmas on the behaviour of the filter function. It is often convenient to state such lemmas, in order to be able to use rewriting @@ -265,7 +265,7 @@ that essentially allow you to iterate on every subset of a given enumerated let rec fold (A,B:Type[0]) (op:B→B→B) (b:B) (p:A→a href="cic:/matita/basics/bool/bool.ind(1,0,0)" title="null"bool/a) (f:A→B) (l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A) on l:B ≝ match l with [ nil ⇒ b - | cons a l ⇒ a href="cic:/matita/basics/bool/if_then_else.def(1)"if_then_else/a ? (p a) (op (f a) (fold A B op b p f l)) + | cons a l ⇒ if p a then op (f a) (fold A B op b p f l) else (fold A B op b p f l)]. (* It is also important to spend a few time to introduce some fancy notation