X-Git-Url: http://matita.cs.unibo.it/gitweb/?a=blobdiff_plain;f=weblib%2Ftutorial%2Fchapter3.ma;h=20e545e37cff1a6fa90ef11852151c25bebdfa28;hb=7aa41e02e64bd09df253cc4267a44b4f49b16e03;hp=4ce43679d7c498e59b28f075dd2f0c080c1bf7bb;hpb=f4fdd895e9c85986998dea3556dc596a3e548c51;p=helm.git diff --git a/weblib/tutorial/chapter3.ma b/weblib/tutorial/chapter3.ma index 4ce43679d..20e545e37 100644 --- a/weblib/tutorial/chapter3.ma +++ b/weblib/tutorial/chapter3.ma @@ -1,20 +1,19 @@ - include "tutorial/chapter2.ma". include "basics/bool.ma". (* Matita supports polymorphic data types. The most typical case are polymorphic lists, parametric in the type of their elements: *) -inductive list (A:Type[0]) : Type[0] := +inductive list (A:Type[0]) : Type[0] ≝ | nil: list A | cons: A -> list A -> list A. (* The type notation list A is the type of all lists with elements of type A: it is -defined by tow constructors: a polymorphic empty list (nil A) and a cons operation, +defined by two constructors: a polymorphic empty list (nil A) and a cons operation, adding a new head element of type A to a previous list. For instance, (list nat) and and (list bool) are lists of natural numbers and booleans, respectively. But we can -also form more complex data typea, like (list (list (nat → nat))), that is a list whose -elements are lists of functions from natural number to natural numbers. +also form more complex data types, like (list (list (nat → nat))), that is a list whose +elements are lists of functions from natural numbers to natural numbers. Typical elements in (list bool) are for instance, nil nat - the empty list of type nat @@ -77,9 +76,10 @@ proceed by induction on l *) lemma append_nil: ∀A.∀l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A.l a title="append" href="cic:/fakeuri.def(1)"@/a a title="nil" href="cic:/fakeuri.def(1)"[/a] a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a l. #A #l (elim l) normalize // qed. -(* similarly, we can define the two functions head and tail. We should decide what to do in -case the input list is empty. For tl, it is natural to return the empty list; for hd, we take -in input a default element d of type A to return in this case. *) +(* similarly, we can define the two functions head and tail. Since we can only define +total functions, we should decide what to do in case the input list is empty. For tl, it +is natural to return the empty list; for hd, we take in input a default element d of type +A to return in this case. *) definition head ≝ λA.λl: a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A.λd:A. match l with [ nil ⇒ d | cons a _ ⇒ a]. @@ -99,36 +99,108 @@ theorem associative_append: #A #l1 #l2 #l3 (elim l1) normalize // qed. (* Problemi con la notazione *) +lemma a_append: ∀A.∀a.∀l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A. (aa title="cons" href="cic:/fakeuri.def(1)":/a:a title="nil" href="cic:/fakeuri.def(1)"[/a]) a title="append" href="cic:/fakeuri.def(1)"@/a l a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a aa title="cons" href="cic:/fakeuri.def(1)":/a:l. +// qed. + theorem append_cons: ∀A.∀a:A.∀l,l1: a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A.la title="append" href="cic:/fakeuri.def(1)"@/a(aa title="cons" href="cic:/fakeuri.def(1)":/a:l1)a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a (l a title="append" href="cic:/fakeuri.def(1)"@/a (a href="cic:/matita/tutorial/chapter3/list.con(0,2,1)"cons/a ? a a title="nil" href="cic:/fakeuri.def(1)"[/a])) a title="append" href="cic:/fakeuri.def(1)"@/a l1. -/2/ qed. +// qed. +(* Other typical functions over lists are those computing the length +of a list, and the function returning the nth element *) -theorem nil_to_nil: ∀A.∀l1,l2:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a span style="text-decoration: underline;"/spanA. - l1a title="append" href="cic:/fakeuri.def(1)"@/al2 a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a title="nil" href="cic:/fakeuri.def(1)"[/a] → l1 a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a title="nil" href="cic:/fakeuri.def(1)"[/a] a title="logical and" href="cic:/fakeuri.def(1)"∧/a l2 a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a title="nil" href="cic:/fakeuri.def(1)"[/a]. -#A #l1 #l2 #isnil @(a href="cic:/matita/tutorial/chapter3/nil_append_elim.def(3)"nil_append_elim/a A l1 l2) /2/ -qed. +let rec length (A:Type[0]) (l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A) on l ≝ +match l with + [ nil ⇒ a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a + | cons a tl ⇒ a href="cic:/matita/tutorial/chapter2/nat.con(0,2,0)"S/a (length A tl)]. + +let rec nth n (A:Type[0]) (l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A) (d:A) ≝ + match n with + [O ⇒ a href="cic:/matita/tutorial/chapter3/head.def(1)"head/a A l d + |S m ⇒ nth m A (a href="cic:/matita/tutorial/chapter3/tail.def(1)"tail/a A l) d]. + +example ex_length: a href="cic:/matita/tutorial/chapter3/length.fix(0,1,1)"length/a ? (a href="cic:/matita/tutorial/chapter3/list.con(0,2,1)"cons/a ? a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a a title="nil" href="cic:/fakeuri.def(1)"[/a]) a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a href="cic:/matita/tutorial/chapter2/nat.con(0,2,0)"S/a a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a. +normalize // qed. + +example ex_nth: a href="cic:/matita/tutorial/chapter3/nth.fix(0,0,2)"nth/a (a href="cic:/matita/tutorial/chapter2/nat.con(0,2,0)"S/a a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a) ? (a href="cic:/matita/tutorial/chapter3/list.con(0,2,1)"cons/a ? (a href="cic:/matita/tutorial/chapter2/nat.con(0,2,0)"S/a a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a) (a href="cic:/matita/tutorial/chapter3/list.con(0,2,1)"cons/a ? a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a a title="nil" href="cic:/fakeuri.def(1)"[/a])) a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a. +normalize // qed. + +(* Proving that the length of l1@l2 is the sum of the lengths of l1 +and l2 just requires a trivial induction on the first list. *) + + lemma length_add: ∀A.∀l1,l2:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A. + a href="cic:/matita/tutorial/chapter3/length.fix(0,1,1)"length/a ? (l1a title="append" href="cic:/fakeuri.def(1)"@/al2) a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a href="cic:/matita/tutorial/chapter2/add.fix(0,0,1)"add/a (a href="cic:/matita/tutorial/chapter3/length.fix(0,1,1)"length/a ? l1) (a href="cic:/matita/tutorial/chapter3/length.fix(0,1,1)"length/a ? l2). +#A #l1 elim l1 normalize // qed. + +(* Let us come to a more interesting question. How can we prove that the empty list is +different from any list with at least one element, that is from any list of the kind (a::l)? +We start defining a simple predicate stating if a list is empty or not. The predicate +is computed by inspection over the list *) + +definition is_nil: ∀A:Type[0].a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A → Prop ≝ +λA.λl.match l with [ nil ⇒ l a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a title="nil" href="cic:/fakeuri.def(1)"[/a] | cons hd tl ⇒ (l a title="leibnitz's non-equality" href="cic:/fakeuri.def(1)"≠/a a title="nil" href="cic:/fakeuri.def(1)"[/a])]. + +(* Next we need a simple result about negation: if you wish to prove ¬P you are +authorized to add P to your hypothesis: *) -definition not_nil: ∀A:Type[0].a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A → Prop ≝ -λA.λl.match l with [ nil ⇒ a href="cic:/matita/basics/logic/True.ind(1,0,0)"True/a| cons hd tl ⇒ a href="cic:/matita/basics/logic/False.ind(1,0,0)"False/a]. +lemma neg_aux : ∀P:Prop. (P → a title="logical not" href="cic:/fakeuri.def(1)"¬/aP) → a title="logical not" href="cic:/fakeuri.def(1)"¬/aP. +#P #PtonegP % /span class="autotactic"3span class="autotrace" trace a href="cic:/matita/basics/logic/absurd.def(2)"absurd/a/span/span/ qed. -theorem nil_cons: +theorem diff_cons_nil: ∀A:Type[0].∀l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A.∀a:A. aa title="cons" href="cic:/fakeuri.def(1)":/a:l a title="leibnitz's non-equality" href="cic:/fakeuri.def(1)"≠/a a title="nil" href="cic:/fakeuri.def(1)"[/a]. -#A #l #a @a href="cic:/matita/basics/logic/Not.con(0,1,1)"nmk/a #Heq (change with (a href="cic:/matita/tutorial/chapter3/not_nil.def(1)"not_nil/a ? (aa title="cons" href="cic:/fakeuri.def(1)":/a:l))) >Heq // -qed. +#A #l #a @a href="cic:/matita/tutorial/chapter3/neg_aux.def(3)"neg_aux/a #Heq +(* we start assuming the new hypothesis Heq of type a::l = [] using neg_aux. +Next we use the change tactic to pass from the current goal a::l≠ [] to the expression +is_nil a::l, convertible with it. *) +(change with (a href="cic:/matita/tutorial/chapter3/is_nil.def(1)"is_nil/a ? (aa title="cons" href="cic:/fakeuri.def(1)":/a:l))) +(* Now, we rewrite with Heq, obtaining (is_nil A []), that reduces to the trivial +goal [] = [] *) +>Heq // qed. + +(* As an application of the previous result let us prove that l1@l2 is empty if and +only if both l1 and l2 are empty. The idea is to proceed by cases on l1: if l1=[] the +statement is trivial; on the other side, if l1 = a::tl, then the hypothesis +(a::tl)@l2 = [] is absurd, hence we can prove anything from it. When we know we can +prove both A and ¬A, a sensible way to proceed is to apply False_ind: ∀P.False → P to the +current goal, that breaks down to prove False, and then absurd: ∀A:Prop. A → ¬A → False +to reduce to the contradictory cases. Usually, you may invoke automation to take care +to solve the absurd case. *) + +lemma nil_to_nil: ∀A.∀l1,l2:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a span style="text-decoration: underline;"/spanA. + l1a title="append" href="cic:/fakeuri.def(1)"@/al2 a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a title="nil" href="cic:/fakeuri.def(1)"[/a] → l1 a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a title="nil" href="cic:/fakeuri.def(1)"[/a] a title="logical and" href="cic:/fakeuri.def(1)"∧/a l2 a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a title="nil" href="cic:/fakeuri.def(1)"[/a]. +#A #l1 cases l1 normalize /span class="autotactic"2span class="autotrace" trace a href="cic:/matita/basics/logic/And.con(0,1,2)"conj/a/span/span/ #a #tl #l2 #H @a href="cic:/matita/basics/logic/False_ind.fix(0,1,1)"False_ind/a /span class="autotactic"2span class="autotrace" trace a href="cic:/matita/basics/logic/absurd.def(2)"absurd/a/span/span/ qed. -(* iterators *) +(* Let us come to some important, higher order, polymorphic functionals +acting over lists. A typical example is the map function, taking a function +f:A → B, a list l = [a1; a2; ... ; an] and returning the list +[f a1; f a2; ... ; f an]. *) let rec map (A,B:Type[0]) (f: A → B) (l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A) on l: a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a B ≝ match l with [ nil ⇒ a title="nil" href="cic:/fakeuri.def(1)"[/a] | cons x tl ⇒ f x a title="cons" href="cic:/fakeuri.def(1)":/a: (map A B f tl)]. - + +(* Another major example is the fold function, that taken a list +l = [a1; a2; ... ;an], a base value b:B, and a function f: A → B → B returns +(f a1 (f a2 (... (f an b)...))). *) + let rec foldr (A,B:Type[0]) (f:A → B → B) (b:B) (l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A) on l :B ≝ - match l with [ nil ⇒ b | cons a l ⇒ f a (foldr A B f b l)]. - + match l with [ nil ⇒ b | cons a l ⇒ f a (foldr A B f b l)]. + +(* As an example of application of foldr, let us use it to define a filter function +that given a list l: list A and a boolean test p:A → bool returns the sublist of elements +satisfying the test. In this case, the result type B of foldr should be (list A), the base +value is [], and f: A → list A →list A is the function that taken x and l returns x::l, if +x satisfies the test, and l otherwise. We use an if_then_else function included from +bbol.ma to this purpose. *) + definition filter ≝ λT.λp:T → a href="cic:/matita/basics/bool/bool.ind(1,0,0)"bool/a. a href="cic:/matita/tutorial/chapter3/foldr.fix(0,4,1)"foldr/a T (a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a T) (λx,l0.a href="cic:/matita/basics/bool/if_then_else.def(1)"if_then_else/a ? (p x) (xa title="cons" href="cic:/fakeuri.def(1)":/a:l0) l0) a title="nil" href="cic:/fakeuri.def(1)"[/a]. +(* Here are a couple of simple lemmas on the behaviour of the filter function. +It is often convenient to state such lemmas, in order to be able to use rewriting +as an alternative to reduction in proofs: reduction is a bit difficult to control. +*) + lemma filter_true : ∀A,l,a,p. p a a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a href="cic:/matita/basics/bool/bool.con(0,1,0)"true/a → a href="cic:/matita/tutorial/chapter3/filter.def(2)"filter/a A p (aa title="cons" href="cic:/fakeuri.def(1)":/a:l) a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a a title="cons" href="cic:/fakeuri.def(1)":/a: a href="cic:/matita/tutorial/chapter3/filter.def(2)"filter/a A p l. #A #l #a #p #pa (elim l) normalize >pa // qed. @@ -137,37 +209,67 @@ lemma filter_false : ∀A,l,a,p. p a a title="leibnitz's equality" href="cic:/f a href="cic:/matita/tutorial/chapter3/filter.def(2)"filter/a A p (aa title="cons" href="cic:/fakeuri.def(1)":/a:l) a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a href="cic:/matita/tutorial/chapter3/filter.def(2)"filter/a A p l. #A #l #a #p #pa (elim l) normalize >pa normalize // qed. -theorem eq_map : ∀A,B,f,g,l. (∀x.f x a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a g x) → a href="cic:/matita/tutorial/chapter3/map.fix(0,3,1)"map/a span style="text-decoration: underline;"/spanA B f l a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a href="cic:/matita/tutorial/chapter3/map.fix(0,3,1)"map/a A B g l. -#A #B #f #g #l #eqfg (elim l) normalize // qed. +(* As another example, let us redefine the map function using foldr. The +result type B is (list B), the base value b is [], and the fold function +of type A → list B → list B is the function mapping a and l to (f a)::l. +*) -(* -let rec dprodl (A:Type[0]) (f:A→Type[0]) (l1:list A) (g:(∀a:A.list (f a))) on l1 ≝ -match l1 with - [ nil ⇒ nil ? - | cons a tl ⇒ (map ??(dp ?? a) (g a)) @ dprodl A f tl g - ]. *) +definition map_again ≝ λA,B,f,l. a href="cic:/matita/tutorial/chapter3/foldr.fix(0,4,1)"foldr/a A (a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a B) (λa,l.f aa title="cons" href="cic:/fakeuri.def(1)":/a:l) a title="nil" href="cic:/fakeuri.def(1)"[/a] l. -(**************************** length ******************************) +(* Can we prove that map_again is "the same" as map? We should first of all +clarify in which sense we expect the two functions to be equal. Equality in +Matita has an intentional meaning: it is the smallest predicate induced by +convertibility, i.e. syntactical equality up to normalization. From an +intentional point of view, map and map_again are not functions, but programs, +and they are clearly different. What we would like to say is that the two +programs behave in the same way: this is a different, extensional equality +that can be defined in the following way. *) -let rec length (A:Type[0]) (l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A) on l ≝ - match l with - [ nil ⇒ a href="cic:/matita/tutorial/chapter2/nat.con(0,1,0)"O/a - | cons a tl ⇒ a href="cic:/matita/tutorial/chapter2/nat.con(0,2,0)"S/a (length A tl)]. +definition ExtEq ≝ λA,B:Type[0].λf,g:A→B.∀a:A.f a a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a g a. -let rec nth n (A:Type[0]) (l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A) (d:A) ≝ - match n with - [O ⇒ a href="cic:/matita/tutorial/chapter3/hd.def(1)"hd/a A l d - |S m ⇒ nth m A (a href="cic:/matita/tutorial/chapter3/tail.def(1)"tail/a A l) d]. +(* Proving that map and map_again are extentionally equal in the +previous sense can be proved by a trivial structural induction on the list *) -(**************************** fold *******************************) +lemma eq_maps: ∀A,B,f. a href="cic:/matita/tutorial/chapter3/ExtEq.def(1)"ExtEq/a ?? (a href="cic:/matita/tutorial/chapter3/map.fix(0,3,1)"map/a A B f) (a href="cic:/matita/tutorial/chapter3/map_again.def(2)"map_again/a A B f). +#A #B #f #n (elim n) normalize // qed. -let rec fold (A,B:Type[0]) (op:B → B → B) (b:B) (p:A→a href="cic:/matita/basics/bool/bool.ind(1,0,0)"bool/a) (f:A→B) (l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A) on l :B ≝ +(* Let us make another remark about extensional equality. It is clear that, +if f is extensionally equal to g, then (map A B f) is extensionally equal to +(map A B g). Let us prove it. *) + +theorem eq_map : ∀A,B,f,g. a href="cic:/matita/tutorial/chapter3/ExtEq.def(1)"ExtEq/a A B f g → a href="cic:/matita/tutorial/chapter3/ExtEq.def(1)"ExtEq/a ?? (a href="cic:/matita/tutorial/chapter3/map.fix(0,3,1)"map/a span style="text-decoration: underline;"/spanA B f) (a href="cic:/matita/tutorial/chapter3/map.fix(0,3,1)"map/a A B g). +#A #B #f #g #eqfg + +(* the relevant point is that we cannot proceed by rewriting f with g via +eqfg, here. Rewriting only works with Matita intensional equality, while here +we are dealing with a different predicate, defined by the user. The right way +to proceed is to unfold the definition of ExtEq, and work by induction on l, +as usual when we want to prove extensional equality between functions over +inductive types; again the rest of the proof is trivial. *) + +#l (elim l) normalize // qed. + +(**************************** BIGOPS *******************************) + +(* Building a library of basic functions, it is important to achieve a +good degree of abstraction and generality, in order to be able to reuse +suitable instances of the same function in different context. This has not +only the obvious benefit of factorizing code, but especially to avoid +repeating proofs of generic properties over and over again. +A really convenient tool is the following combination of fold and filter, +that essentially allow you to iterate on every subset of a given enumerated +(finite) type, represented as a list. *) + + let rec fold (A,B:Type[0]) (op:B → B → B) (b:B) (p:A→a href="cic:/matita/basics/bool/bool.ind(1,0,0)" title="null"bool/a) (f:A→B) (l:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A) on l :B ≝ match l with [ nil ⇒ b | cons a l ⇒ a href="cic:/matita/basics/bool/if_then_else.def(1)"if_then_else/a ? (p a) (op (f a) (fold A B op b p f l)) (fold A B op b p f l)]. - -notation "\fold [ op , nil ]_{ ident i ∈ l | p} f" + +(* It is also important to spend a few time to introduce some fancy notation +for these iterators. *) + + notation "\fold [ op , nil ]_{ ident i ∈ l | p} f" with precedence 80 for @{'fold $op $nil (λ${ident i}. $p) (λ${ident i}. $f) $l}. @@ -200,15 +302,15 @@ theorem fold_filter: qed. record Aop (A:Type[0]) (nil:A) : Type[0] ≝ - {op :2> A → A → A; - nill:∀a. op nil a a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a; - nilr:∀a. op a nil a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a; - assoc: ∀a,b,c.op a (op b c) a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a op (op a b) c - }. - -theorem fold_sum: ∀A,B. ∀I,J:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A.∀nil.∀op:a href="cic:/matita/tutorial/chapter3/Aop.ind(1,0,2)"Aop/a B nil.∀f. - op (a title="\fold" href="cic:/fakeuri.def(1)"\fold/a[op,nil]_{i∈I} (f i)) (a title="\fold" href="cic:/fakeuri.def(1)"\fold/a[op,nil]_{i∈J} (f i)) a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a - a title="\fold" href="cic:/fakeuri.def(1)"\fold/a[op,nil]_{i∈(Ia title="append" href="cic:/fakeuri.def(1)"@/aJ)} (f i). +{op :2> A → A → A; + nill:∀a. op nil a a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a; + nilr:∀a. op a nil a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a a; + assoc: ∀a,b,c.op a (op b c) a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a op (op a b) c +}. + +theorem fold_sum: ∀A,B. ∀I,J:a href="cic:/matita/tutorial/chapter3/list.ind(1,0,1)"list/a A.∀nil.∀op:a href="cic:/matita/tutorial/chapter3/Aop.ind(1,0,2)"Aop/a B nil.∀f:A → B. + op (a title="\fold" href="cic:/fakeuri.def(1)"\fold/a[op,nil]_{i ∈ I} (f i)) (a title="\fold" href="cic:/fakeuri.def(1)"\fold/a[op,nil]_{i ∈ J} (f i)) a title="leibnitz's equality" href="cic:/fakeuri.def(1)"=/a + a title="\fold" href="cic:/fakeuri.def(1)"\fold/a[op,nil]_{i ∈ (Ia title="append" href="cic:/fakeuri.def(1)"@/aJ)} (f i). #A #B #I #J #nil #op #f (elim I) normalize - [>a href="cic:/matita/tutorial/chapter3/nill.fix(0,2,2)"nill/a //|#a #tl #Hind <a href="cic:/matita/tutorial/chapter3/assoc.fix(0,2,2)"assoc/a //] + [>a href="cic:/matita/tutorial/chapter3/nill.fix(0,2,2)"nill/a//|#a #tl #Hind <a href="cic:/matita/tutorial/chapter3/assoc.fix(0,2,2)"assoc/a //] qed. \ No newline at end of file