From 488fb7a872e0cf99f58a97baf6ec2358625d1212 Mon Sep 17 00:00:00 2001 From: Enrico Tassi Date: Thu, 24 Sep 2009 11:50:42 +0000 Subject: [PATCH] .... --- .../matita/nlibrary/topology/convert.awk | 1 + .../matita/nlibrary/topology/grafite.lang | 16 + .../software/matita/nlibrary/topology/igft.ma | 237 +++++++- .../matita/nlibrary/topology/preamble.xml | 20 +- .../matita/nlibrary/topology/sh_gedit.css | 66 +++ .../matita/nlibrary/topology/sh_grafite.js | 71 +++ .../matita/nlibrary/topology/sh_main.js | 538 ++++++++++++++++++ 7 files changed, 922 insertions(+), 27 deletions(-) create mode 100644 helm/software/matita/nlibrary/topology/grafite.lang create mode 100644 helm/software/matita/nlibrary/topology/sh_gedit.css create mode 100644 helm/software/matita/nlibrary/topology/sh_grafite.js create mode 100644 helm/software/matita/nlibrary/topology/sh_main.js diff --git a/helm/software/matita/nlibrary/topology/convert.awk b/helm/software/matita/nlibrary/topology/convert.awk index 68bcf8a62..757872324 100644 --- a/helm/software/matita/nlibrary/topology/convert.awk +++ b/helm/software/matita/nlibrary/topology/convert.awk @@ -9,6 +9,7 @@ BEGIN { match($0, "screenshot *\"([^\"]+)\"", data); key = data[1]; refs[key] = key ".png"; + $0 = gensub(/\(\*\* *screenshot[^*]*\*\)/,"",$0); } # literate programming diff --git a/helm/software/matita/nlibrary/topology/grafite.lang b/helm/software/matita/nlibrary/topology/grafite.lang new file mode 100644 index 000000000..93fabcc0c --- /dev/null +++ b/helm/software/matita/nlibrary/topology/grafite.lang @@ -0,0 +1,16 @@ +preproc = "napply|ncases|nelim|nletin|ncut|nauto|nwhd|nnormalize|nassumption|ngeneralize|nchange|nrewrite|nlapply" + +number = +'\<[+-]?((0x[[:xdigit:]]+)|(([[:digit:]]*\.)? +[[:digit:]]+([eE][+-]?[[:digit:]]+)?))u?((int(?:8|16|32|64))|L)?\>' + +string delim "\"" "\"" escape "\\" + +keyword = "match|with|naxiom|nlemma|ntheorem|nrecord|for|ninductive|ncoinductive|ndefinition|nlet|rec|corec|notation|interpretation|nqed|include" + +comment delim "(*" "*)" multiline nested + +type = '\<(?:CProp|Type|Prop)\>' + +symbol = "[","]","\|","$","@","{","}" + diff --git a/helm/software/matita/nlibrary/topology/igft.ma b/helm/software/matita/nlibrary/topology/igft.ma index 3c97b9c25..1cb932184 100644 --- a/helm/software/matita/nlibrary/topology/igft.ma +++ b/helm/software/matita/nlibrary/topology/igft.ma @@ -12,6 +12,12 @@ the formalization of the paper by S.Berardi and S. Valentini. The tutorial is by Enrico Tassi. +The tutorial spends a considerable amount of effort in defining +notations that resemble the ones used in the original paper. We believe +this a important part of every formalization, not only for the estetic +point of view, but also from the practical point of view. Being +consistent allows to follow the paper in a pedantic way. + Orientering ----------- @@ -40,6 +46,7 @@ some notation attached to them: - A ∪ B `A \cup B` - A ∩ B `A \cap B` +- A ≬ B `A \between B` - x ∈ A `x \in A` - Ω^A, that is the type of the subsets of A, `\Omega ^ A` @@ -219,38 +226,186 @@ as subscript), separating them with a comma followed by a little space. The first (technical) definition -------------------------------- +Before defining the cover relation as an inductive predicate, one +has to notice that the infinity rule uses, in its hypotheses, the +cover relation between two subsets, while the inductive predicate +we are going to define relates an element and a subset. + +An option would be to unfold the definition of cover between subsets, +but we prefer to define the abstract notion of cover between subsets +(so that we can attach a (ambiguous) notation to it). + +Anyway, to ease the understaing of the definition of the cover relation +between subsets, we first define the inductive predicate unfolding the +definition, and we later refine it with. + +DOCEND*) + +ninductive xcover (A : Ax) (U : Ω^A) : A → CProp[0] ≝ +| xcreflexivity : ∀a:A. a ∈ U → xcover A U a +| xcinfinity : ∀a:A.∀i:𝐈 a. (∀y.y ∈ 𝐂 a i → xcover A U y) → xcover A U a. + +(*DOCBEGIN + +We defined the xcover (x will be removed in the final version of the +definition) as an inductive predicate. The arity of the inductive +predicate has to be carefully analyzed: + +> (A : Ax) (U : Ω^A) : A → CProp[0] +The syntax separates with `:` abstractions that are fixed for every +constructor (introduction rule) and abstractions that can change. In that +case the parameter `U` is abstracted once and forall in front of every +constructor, and every occurrence of the inductive predicate is applied to +`U` in a consistent way. Arguments abstracted on the right of `:` are not +constant, for example the xcinfinity constructor introduces `a ◃ U`, +but under the assumption that (for every y) `y ◃ U`. In that rule, the left +had side of the predicate changes, thus it has to be abstrated (in the arity +of the inductive predicate) on the right of `:`. + +DOCEND*) + +(* ncheck xcreflexivity. *) + +(*DOCBEGIN + +We want now to abstract out `(∀y.y ∈ 𝐂 a i → xcover A U y)` and define +a notion `cover_set` to which a notation `𝐂 a i ◃ U` can be attached. + +This notion has to be abstracted over the cover relation (whose +type is the arity of the inductive `xcover` predicate just defined). + +Then it has to be abstracted over the arguments of that cover relation, +i.e. the axiom set and the set U, and the subset (in that case `𝐂 a i`) +sitting on the left hand side of `◃`. DOCEND*) ndefinition cover_set : - ∀c: ∀A:Ax.Ω^A → A → CProp[0]. ∀A:Ax.∀C,U:Ω^A. CProp[0] + ∀cover: ∀A:Ax.Ω^A → A → CProp[0]. ∀A:Ax.∀C,U:Ω^A. CProp[0] ≝ - λc: ∀A:Ax.Ω^A → A → CProp[0]. λA,C,U.∀y.y ∈ C → c A U y. + λcover. λA, C,U. ∀y.y ∈ C → cover A U y. -ndefinition cover_set_interactive : - ∀c: ∀A:Ax.Ω^A → A → CProp[0]. ∀A:Ax.∀C,U:Ω^A. CProp[0]. -#cover; #A; #C; #U; napply (∀y:A.y ∈ C → ?); napply cover; -##[ napply A; -##| napply U; -##| napply y; -##] -nqed. +(*DOCBEGIN + +The `ndefinition` command takes a name, a type and body (of that type). +The type can be omitted, and in that case it is inferred by the system. +If the type is given, the system uses it to infer implicit arguments +of the body. In that case all types are left implicit in the body. + +We now define the notation `a ◃ b`. Here the keywork `hvbox` +and `break` tell the system how to wrap text when it does not +fit the screen (they can be safely ignore for the scope of +this tutorial). we also add an interpretation for that notation, +where the (abstracted) cover relation is implicit. The system +will not be able to infer it from the other arguments `C` and `U` +and will thus prompt the user for it. This is also why we named this +interpretation `covers set temp`: we will later define another +interpretation in which the cover relation is the one we are going to +define. + +DOCEND*) -(* a \ltri b *) notation "hvbox(a break ◃ b)" non associative with precedence 45 for @{ 'covers $a $b }. interpretation "covers set temp" 'covers C U = (cover_set ?? C U). +(*DOCBEGIN + +We can now define the cover relation using the `◃` notation for +the premise of infinity. + +DOCEND*) + ninductive cover (A : Ax) (U : Ω^A) : A → CProp[0] ≝ -| creflexivity : ∀a:A. a ∈ U → cover ? U a -| cinfinity : ∀a:A.∀i:𝐈 a. 𝐂 a i ◃ U → cover ? U a. +| creflexivity : ∀a. a ∈ U → cover ? U a +| cinfinity : ∀a. ∀i. 𝐂 a i ◃ U → cover ? U a. +(** screenshot "cover". *) napply cover; nqed. +(*DOCBEGIN + +Note that the system accepts the definition +but prompts the user for the relation the `cover_set` notion is +abstracted on. + +![The system asks for a cover relation][cover] + +The orizontal line separates the hypotheses from the conclusion. +The `napply cover` command tells the system that the relation +it is looking for is exactly our first context entry (i.e. the inductive +predicate we are defining, up to α-conversion); while the `nqed` command +ends a definition or proof. + +We can now define the interpretation for the cover relation between an +element and a subset fist, then between two subsets (but this time +we fixed the relation `cover_set` is abstracted on). + +DOCEND*) + interpretation "covers" 'covers a U = (cover ? U a). -(* interpretation "covers set" 'covers a U = (cover_set cover ? a U). *) +interpretation "covers set" 'covers a U = (cover_set cover ? a U). + +(*DOCBEGIN + +We will proceed similarly for the fish relation, but before going +on it is better to give a short introduction to the proof mode of Matita. +We define again the `cover_set` term, but this time we will build +its body interactively. In λ-calculus Matita is based on, CIC, proofs +and terms share the same syntax, and it thus possible to use the +commands devoted to build proof term to build regular definitions. + +DOCEND*) + + +ndefinition xcover_set : + ∀c: ∀A:Ax.Ω^A → A → CProp[0]. ∀A:Ax.∀C,U:Ω^A. CProp[0]. +(** screenshot "xcover-set-1". *) +(*DOCBEGIN +The system asks for a proof of the full statement, in an empty context. +![xcover_set proof step ][xcover-set-1] +The `#` command in the ∀-introduction rule, it gives a name to an +assumption putting it in the context, and generates a λ-abstraction +in the proof term. +DOCEND*) +#cover; #A; #C; #U; (** screenshot "xcover-set-2". *) +(*DOCBEGIN +![xcover_set proof step ][xcover-set-2] +We have now to provide a proposition, and we exhibit it. We left +a part of it implicit; since the system cannot infer it it will +ask it later. Note that the type of `∀y:A.y ∈ C → ?` is a proposition +whenever `?` is. +DOCEND*) +napply (∀y:A.y ∈ C → ?); (** screenshot "xcover-set-3". *) +(*DOCBEGIN +![xcover_set proof step ][xcover-set-3] +The proposition we want to provide is an application of the +cover relation we have abstracted in the context. The command +`napply`, if the given term has not the expected type (in that +case it is a product versus a proposition) it applies it to as many +implicit arguments as necessary (in that case `? ? ?`). +DOCEND*) +napply cover; (** screenshot "xcover-set-4". *) +(*DOCBEGIN +![xcover_set proof step ][xcover-set-4] +The system will now ask in turn the three implicit arguments +passed to cover. The syntax `##[` allows to start a branching +to tackle every sub proof individually, otherwise every command +is applied to every subrpoof. The command `##|` switches to the next +subproof and `##]` ends the branching. +DOCEND*) +##[ napply A; +##| napply U; +##| napply y; +##] +nqed. + +(*DOCBEGIN +The definition of fish works exactly the same way as for cover, except +that it is defined as a coinductive proposition. +DOCEND*) ndefinition fish_set ≝ λf:∀A:Ax.Ω^A → A → CProp[0]. λA,U,V. @@ -270,22 +425,59 @@ nqed. interpretation "fish set" 'fish A U = (fish_set fish ? U A). interpretation "fish" 'fish a U = (fish ? U a). +(*DOCBEGIN + +Matita is able to generate elimination rules for inductive types, +but not introduction rules for the coinductive case. + +DOCEND*) + +(* ncheck cover_rect_CProp0. *) + +(*DOCBEGIN + +We thus have to define the introduction rule for fish by corecursion. +Here we again use the proof mode of Matita to exhibit the body of the +corecursive function. + +DOCEND*) + nlet corec fish_rec (A:Ax) (U: Ω^A) (P: Ω^A) (H1: P ⊆ U) - (H2: ∀a:A. a ∈ P → ∀j: 𝐈 a. 𝐂 a j ≬ P): - ∀a:A. ∀p: a ∈ P. a ⋉ U ≝ ?. -#a; #p; napply cfish; (** screenshot "def-fish-rec". *) -##[ napply H1; napply p; -##| #i; ncases (H2 a p i); #x; *; #xC; #xP; @; ##[napply x] - @; ##[ napply xC ] napply (fish_rec ? U P); nassumption; + (H2: ∀a:A. a ∈ P → ∀j: 𝐈 a. 𝐂 a j ≬ P): ∀a:A. ∀p: a ∈ P. a ⋉ U ≝ ?. +(** screenshot "def-fish-rec-1". *) +#a; #p; napply cfish; +##[ (** screenshot "def-fish-rec-2". *) napply H1; + (** screenshot "def-fish-rec-3". *) napply p; +##| (** screenshot "def-fish-rec-4". *) #i; ncases (H2 a p i); + (** screenshot "def-fish-rec-5". *) #x; *; #xC; #xP; + (** screenshot "def-fish-rec-5". *) @; + ##[ (** screenshot "def-fish-rec-6". *) napply x + ##| (** screenshot "def-fish-rec-7". *) + @; ##[ napply xC; + ##| (** screenshot "def-fish-rec-8". *) + napply (fish_rec ? U P); + nassumption; + ##] + ##] ##] nqed. -notation "◃U" non associative with precedence 55 -for @{ 'coverage $U }. +(*DOCBEGIN +![fish proof step][def-fish-rec-1] +![fish proof step][def-fish-rec-2] +![fish proof step][def-fish-rec-3] +![fish proof step][def-fish-rec-4] +![fish proof step][def-fish-rec-5] +![fish proof step][def-fish-rec-6] +![fish proof step][def-fish-rec-7] +![fish proof step][def-fish-rec-8] +DOCEND*) ndefinition coverage : ∀A:Ax.∀U:Ω^A.Ω^A ≝ λA,U.{ a | a ◃ U }. +notation "◃U" non associative with precedence 55 for @{ 'coverage $U }. + interpretation "coverage cover" 'coverage U = (coverage ? U). ndefinition cover_equation : ∀A:Ax.∀U,X:Ω^A.CProp[0] ≝ λA,U,X. @@ -430,7 +622,6 @@ naxiom setoidification : Bla Bla, -
foo
DOCEND*) diff --git a/helm/software/matita/nlibrary/topology/preamble.xml b/helm/software/matita/nlibrary/topology/preamble.xml index bbba386e1..a85be4fc6 100644 --- a/helm/software/matita/nlibrary/topology/preamble.xml +++ b/helm/software/matita/nlibrary/topology/preamble.xml @@ -4,9 +4,9 @@ + + + + - + diff --git a/helm/software/matita/nlibrary/topology/sh_gedit.css b/helm/software/matita/nlibrary/topology/sh_gedit.css new file mode 100644 index 000000000..c1529dde4 --- /dev/null +++ b/helm/software/matita/nlibrary/topology/sh_gedit.css @@ -0,0 +1,66 @@ +pre.sh_sourceCode { + background-color: white; + color: black; + font-style: normal; + font-weight: normal; +} + +pre.sh_sourceCode .sh_keyword { color: brown; font-weight: bold; } /* language keywords */ +pre.sh_sourceCode .sh_type { color: darkgreen; font-weight: bold; } /* basic types */ +pre.sh_sourceCode .sh_usertype { color: teal; } /* user defined types */ +pre.sh_sourceCode .sh_string { color: magenta; font-family: monospace; } /* strings and chars */ +pre.sh_sourceCode .sh_regexp { color: orange; font-family: monospace; } /* regular expressions */ +pre.sh_sourceCode .sh_specialchar { color: pink; font-family: monospace; } /* e.g., \n, \t, \\ */ +pre.sh_sourceCode .sh_comment { color: brown; font-style: italic; } /* comments */ +pre.sh_sourceCode .sh_number { color: black; } /* literal numbers */ +pre.sh_sourceCode .sh_preproc { color: MediumSeaGreen; } /* e.g., #include, import */ +pre.sh_sourceCode .sh_symbol { color:brown; font-weight: bold; } /* e.g., <, >, + */ +pre.sh_sourceCode .sh_function { color: black; font-weight: bold; } /* function calls and declarations */ +pre.sh_sourceCode .sh_cbracket { color: red; } /* block brackets (e.g., {, }) */ +pre.sh_sourceCode .sh_todo { font-weight: bold; background-color: cyan; } /* TODO and FIXME */ + +/* Predefined variables and functions (for instance glsl) */ +pre.sh_sourceCode .sh_predef_var { color: darkblue; } +pre.sh_sourceCode .sh_predef_func { color: darkblue; font-weight: bold; } + +/* for OOP */ +pre.sh_sourceCode .sh_classname { color: teal; } + +/* line numbers (not yet implemented) */ +pre.sh_sourceCode .sh_linenum { color: black; font-family: monospace; } + +/* Internet related */ +pre.sh_sourceCode .sh_url { color: blue; text-decoration: underline; font-family: monospace; } + +/* for ChangeLog and Log files */ +pre.sh_sourceCode .sh_date { color: blue; font-weight: bold; } +pre.sh_sourceCode .sh_time, pre.sh_sourceCode .sh_file { color: darkblue; font-weight: bold; } +pre.sh_sourceCode .sh_ip, pre.sh_sourceCode .sh_name { color: darkgreen; } + +/* for Prolog, Perl... */ +pre.sh_sourceCode .sh_variable { color: darkgreen; } + +/* for LaTeX */ +pre.sh_sourceCode .sh_italics { color: darkgreen; font-style: italic; } +pre.sh_sourceCode .sh_bold { color: darkgreen; font-weight: bold; } +pre.sh_sourceCode .sh_underline { color: darkgreen; text-decoration: underline; } +pre.sh_sourceCode .sh_fixed { color: green; font-family: monospace; } +pre.sh_sourceCode .sh_argument { color: darkgreen; } +pre.sh_sourceCode .sh_optionalargument { color: purple; } +pre.sh_sourceCode .sh_math { color: orange; } +pre.sh_sourceCode .sh_bibtex { color: blue; } + +/* for diffs */ +pre.sh_sourceCode .sh_oldfile { color: orange; } +pre.sh_sourceCode .sh_newfile { color: darkgreen; } +pre.sh_sourceCode .sh_difflines { color: blue; } + +/* for css */ +pre.sh_sourceCode .sh_selector { color: purple; } +pre.sh_sourceCode .sh_property { color: blue; } +pre.sh_sourceCode .sh_value { color: darkgreen; font-style: italic; } + +/* other */ +pre.sh_sourceCode .sh_section { color: black; font-weight: bold; } +pre.sh_sourceCode .sh_paren { color: red; } +pre.sh_sourceCode .sh_attribute { color: darkgreen; } diff --git a/helm/software/matita/nlibrary/topology/sh_grafite.js b/helm/software/matita/nlibrary/topology/sh_grafite.js new file mode 100644 index 000000000..f327caada --- /dev/null +++ b/helm/software/matita/nlibrary/topology/sh_grafite.js @@ -0,0 +1,71 @@ +if (! this.sh_languages) { + this.sh_languages = {}; +} +sh_languages['grafite'] = [ + [ + [ + /\b(?:napply|ncases|nelim|nletin|ncut|nauto|nwhd|nnormalize|nassumption|ngeneralize|nchange|nrewrite|nlapply)\b/g, + 'sh_preproc', + -1 + ], + [ + /\b[+-]?(?:(?:0x[A-Fa-f0-9]+)|(?:(?:[\d]*\.)?[\d]+(?:[eE][+-]?[\d]+)?))u?(?:(?:int(?:8|16|32|64))|L)?\b/g, + 'sh_number', + -1 + ], + [ + /"/g, + 'sh_string', + 1 + ], + [ + /\b(?:match|with|naxiom|nlemma|ntheorem|nrecord|for|ninductive|ncoinductive|ndefinition|nlet|rec|corec|notation|interpretation|nqed|include)\b/g, + 'sh_keyword', + -1 + ], + [ + /\(\*/g, + 'sh_comment', + 2 + ], + [ + /\b(?:CProp|Type|Prop)\b/g, + 'sh_type', + -1 + ], + [ + /\[|\]|\||\$|@|\{|\}/g, + 'sh_symbol', + -1 + ] + ], + [ + [ + /$/g, + null, + -2 + ], + [ + /\\(?:\\|")/g, + null, + -1 + ], + [ + /"/g, + 'sh_string', + -2 + ] + ], + [ + [ + /\*\)/g, + 'sh_comment', + -2 + ], + [ + /\(\*/g, + 'sh_comment', + 2 + ] + ] +]; diff --git a/helm/software/matita/nlibrary/topology/sh_main.js b/helm/software/matita/nlibrary/topology/sh_main.js new file mode 100644 index 000000000..1fe3ea078 --- /dev/null +++ b/helm/software/matita/nlibrary/topology/sh_main.js @@ -0,0 +1,538 @@ +/* +SHJS - Syntax Highlighting in JavaScript +Copyright (C) 2007, 2008 gnombat@users.sourceforge.net +License: http://shjs.sourceforge.net/doc/gplv3.html +*/ + +if (! this.sh_languages) { + this.sh_languages = {}; +} +var sh_requests = {}; + +function sh_isEmailAddress(url) { + if (/^mailto:/.test(url)) { + return false; + } + return url.indexOf('@') !== -1; +} + +function sh_setHref(tags, numTags, inputString) { + var url = inputString.substring(tags[numTags - 2].pos, tags[numTags - 1].pos); + if (url.length >= 2 && url.charAt(0) === '<' && url.charAt(url.length - 1) === '>') { + url = url.substr(1, url.length - 2); + } + if (sh_isEmailAddress(url)) { + url = 'mailto:' + url; + } + tags[numTags - 2].node.href = url; +} + +/* +Konqueror has a bug where the regular expression /$/g will not match at the end +of a line more than once: + + var regex = /$/g; + var match; + + var line = '1234567890'; + regex.lastIndex = 10; + match = regex.exec(line); + + var line2 = 'abcde'; + regex.lastIndex = 5; + match = regex.exec(line2); // fails +*/ +function sh_konquerorExec(s) { + var result = ['']; + result.index = s.length; + result.input = s; + return result; +} + +/** +Highlights all elements containing source code in a text string. The return +value is an array of objects, each representing an HTML start or end tag. Each +object has a property named pos, which is an integer representing the text +offset of the tag. Every start tag also has a property named node, which is the +DOM element started by the tag. End tags do not have this property. +@param inputString a text string +@param language a language definition object +@return an array of tag objects +*/ +function sh_highlightString(inputString, language) { + if (/Konqueror/.test(navigator.userAgent)) { + if (! language.konquered) { + for (var s = 0; s < language.length; s++) { + for (var p = 0; p < language[s].length; p++) { + var r = language[s][p][0]; + if (r.source === '$') { + r.exec = sh_konquerorExec; + } + } + } + language.konquered = true; + } + } + + var a = document.createElement('a'); + var span = document.createElement('span'); + + // the result + var tags = []; + var numTags = 0; + + // each element is a pattern object from language + var patternStack = []; + + // the current position within inputString + var pos = 0; + + // the name of the current style, or null if there is no current style + var currentStyle = null; + + var output = function(s, style) { + var length = s.length; + // this is more than just an optimization - we don't want to output empty elements + if (length === 0) { + return; + } + if (! style) { + var stackLength = patternStack.length; + if (stackLength !== 0) { + var pattern = patternStack[stackLength - 1]; + // check whether this is a state or an environment + if (! pattern[3]) { + // it's not a state - it's an environment; use the style for this environment + style = pattern[1]; + } + } + } + if (currentStyle !== style) { + if (currentStyle) { + tags[numTags++] = {pos: pos}; + if (currentStyle === 'sh_url') { + sh_setHref(tags, numTags, inputString); + } + } + if (style) { + var clone; + if (style === 'sh_url') { + clone = a.cloneNode(false); + } + else { + clone = span.cloneNode(false); + } + clone.className = style; + tags[numTags++] = {node: clone, pos: pos}; + } + } + pos += length; + currentStyle = style; + }; + + var endOfLinePattern = /\r\n|\r|\n/g; + endOfLinePattern.lastIndex = 0; + var inputStringLength = inputString.length; + while (pos < inputStringLength) { + var start = pos; + var end; + var startOfNextLine; + var endOfLineMatch = endOfLinePattern.exec(inputString); + if (endOfLineMatch === null) { + end = inputStringLength; + startOfNextLine = inputStringLength; + } + else { + end = endOfLineMatch.index; + startOfNextLine = endOfLinePattern.lastIndex; + } + + var line = inputString.substring(start, end); + + var matchCache = []; + for (;;) { + var posWithinLine = pos - start; + + var stateIndex; + var stackLength = patternStack.length; + if (stackLength === 0) { + stateIndex = 0; + } + else { + // get the next state + stateIndex = patternStack[stackLength - 1][2]; + } + + var state = language[stateIndex]; + var numPatterns = state.length; + var mc = matchCache[stateIndex]; + if (! mc) { + mc = matchCache[stateIndex] = []; + } + var bestMatch = null; + var bestPatternIndex = -1; + for (var i = 0; i < numPatterns; i++) { + var match; + if (i < mc.length && (mc[i] === null || posWithinLine <= mc[i].index)) { + match = mc[i]; + } + else { + var regex = state[i][0]; + regex.lastIndex = posWithinLine; + match = regex.exec(line); + mc[i] = match; + } + if (match !== null && (bestMatch === null || match.index < bestMatch.index)) { + bestMatch = match; + bestPatternIndex = i; + if (match.index === posWithinLine) { + break; + } + } + } + + if (bestMatch === null) { + output(line.substring(posWithinLine), null); + break; + } + else { + // got a match + if (bestMatch.index > posWithinLine) { + output(line.substring(posWithinLine, bestMatch.index), null); + } + + var pattern = state[bestPatternIndex]; + + var newStyle = pattern[1]; + var matchedString; + if (newStyle instanceof Array) { + for (var subexpression = 0; subexpression < newStyle.length; subexpression++) { + matchedString = bestMatch[subexpression + 1]; + output(matchedString, newStyle[subexpression]); + } + } + else { + matchedString = bestMatch[0]; + output(matchedString, newStyle); + } + + switch (pattern[2]) { + case -1: + // do nothing + break; + case -2: + // exit + patternStack.pop(); + break; + case -3: + // exitall + patternStack.length = 0; + break; + default: + // this was the start of a delimited pattern or a state/environment + patternStack.push(pattern); + break; + } + } + } + + // end of the line + if (currentStyle) { + tags[numTags++] = {pos: pos}; + if (currentStyle === 'sh_url') { + sh_setHref(tags, numTags, inputString); + } + currentStyle = null; + } + pos = startOfNextLine; + } + + return tags; +} + +//////////////////////////////////////////////////////////////////////////////// +// DOM-dependent functions + +function sh_getClasses(element) { + var result = []; + var htmlClass = element.className; + if (htmlClass && htmlClass.length > 0) { + var htmlClasses = htmlClass.split(' '); + for (var i = 0; i < htmlClasses.length; i++) { + if (htmlClasses[i].length > 0) { + result.push(htmlClasses[i]); + } + } + } + return result; +} + +function sh_addClass(element, name) { + var htmlClasses = sh_getClasses(element); + for (var i = 0; i < htmlClasses.length; i++) { + if (name.toLowerCase() === htmlClasses[i].toLowerCase()) { + return; + } + } + htmlClasses.push(name); + element.className = htmlClasses.join(' '); +} + +/** +Extracts the tags from an HTML DOM NodeList. +@param nodeList a DOM NodeList +@param result an object with text, tags and pos properties +*/ +function sh_extractTagsFromNodeList(nodeList, result) { + var length = nodeList.length; + for (var i = 0; i < length; i++) { + var node = nodeList.item(i); + switch (node.nodeType) { + case 1: + if (node.nodeName.toLowerCase() === 'br') { + var terminator; + if (/MSIE/.test(navigator.userAgent)) { + terminator = '\r'; + } + else { + terminator = '\n'; + } + result.text.push(terminator); + result.pos++; + } + else { + result.tags.push({node: node.cloneNode(false), pos: result.pos}); + sh_extractTagsFromNodeList(node.childNodes, result); + result.tags.push({pos: result.pos}); + } + break; + case 3: + case 4: + result.text.push(node.data); + result.pos += node.length; + break; + } + } +} + +/** +Extracts the tags from the text of an HTML element. The extracted tags will be +returned as an array of tag objects. See sh_highlightString for the format of +the tag objects. +@param element a DOM element +@param tags an empty array; the extracted tag objects will be returned in it +@return the text of the element +@see sh_highlightString +*/ +function sh_extractTags(element, tags) { + var result = {}; + result.text = []; + result.tags = tags; + result.pos = 0; + sh_extractTagsFromNodeList(element.childNodes, result); + return result.text.join(''); +} + +/** +Merges the original tags from an element with the tags produced by highlighting. +@param originalTags an array containing the original tags +@param highlightTags an array containing the highlighting tags - these must not overlap +@result an array containing the merged tags +*/ +function sh_mergeTags(originalTags, highlightTags) { + var numOriginalTags = originalTags.length; + if (numOriginalTags === 0) { + return highlightTags; + } + + var numHighlightTags = highlightTags.length; + if (numHighlightTags === 0) { + return originalTags; + } + + var result = []; + var originalIndex = 0; + var highlightIndex = 0; + + while (originalIndex < numOriginalTags && highlightIndex < numHighlightTags) { + var originalTag = originalTags[originalIndex]; + var highlightTag = highlightTags[highlightIndex]; + + if (originalTag.pos <= highlightTag.pos) { + result.push(originalTag); + originalIndex++; + } + else { + result.push(highlightTag); + if (highlightTags[highlightIndex + 1].pos <= originalTag.pos) { + highlightIndex++; + result.push(highlightTags[highlightIndex]); + highlightIndex++; + } + else { + // new end tag + result.push({pos: originalTag.pos}); + + // new start tag + highlightTags[highlightIndex] = {node: highlightTag.node.cloneNode(false), pos: originalTag.pos}; + } + } + } + + while (originalIndex < numOriginalTags) { + result.push(originalTags[originalIndex]); + originalIndex++; + } + + while (highlightIndex < numHighlightTags) { + result.push(highlightTags[highlightIndex]); + highlightIndex++; + } + + return result; +} + +/** +Inserts tags into text. +@param tags an array of tag objects +@param text a string representing the text +@return a DOM DocumentFragment representing the resulting HTML +*/ +function sh_insertTags(tags, text) { + var doc = document; + + var result = document.createDocumentFragment(); + var tagIndex = 0; + var numTags = tags.length; + var textPos = 0; + var textLength = text.length; + var currentNode = result; + + // output one tag or text node every iteration + while (textPos < textLength || tagIndex < numTags) { + var tag; + var tagPos; + if (tagIndex < numTags) { + tag = tags[tagIndex]; + tagPos = tag.pos; + } + else { + tagPos = textLength; + } + + if (tagPos <= textPos) { + // output the tag + if (tag.node) { + // start tag + var newNode = tag.node; + currentNode.appendChild(newNode); + currentNode = newNode; + } + else { + // end tag + currentNode = currentNode.parentNode; + } + tagIndex++; + } + else { + // output text + currentNode.appendChild(doc.createTextNode(text.substring(textPos, tagPos))); + textPos = tagPos; + } + } + + return result; +} + +/** +Highlights an element containing source code. Upon completion of this function, +the element will have been placed in the "sh_sourceCode" class. +@param element a DOM
 element containing the source code to be highlighted
+@param  language  a language definition object
+*/
+function sh_highlightElement(element, language) {
+  sh_addClass(element, 'sh_sourceCode');
+  var originalTags = [];
+  var inputString = sh_extractTags(element, originalTags);
+  var highlightTags = sh_highlightString(inputString, language);
+  var tags = sh_mergeTags(originalTags, highlightTags);
+  var documentFragment = sh_insertTags(tags, inputString);
+  while (element.hasChildNodes()) {
+    element.removeChild(element.firstChild);
+  }
+  element.appendChild(documentFragment);
+}
+
+function sh_getXMLHttpRequest() {
+  if (window.ActiveXObject) {
+    return new ActiveXObject('Msxml2.XMLHTTP');
+  }
+  else if (window.XMLHttpRequest) {
+    return new XMLHttpRequest();
+  }
+  throw 'No XMLHttpRequest implementation available';
+}
+
+function sh_load(language, element, prefix, suffix) {
+  if (language in sh_requests) {
+    sh_requests[language].push(element);
+    return;
+  }
+  sh_requests[language] = [element];
+  var request = sh_getXMLHttpRequest();
+  var url = prefix + 'sh_' + language + suffix;
+  request.open('GET', url, true);
+  request.onreadystatechange = function () {
+    if (request.readyState === 4) {
+      try {
+        if (! request.status || request.status === 200) {
+          eval(request.responseText);
+          var elements = sh_requests[language];
+          for (var i = 0; i < elements.length; i++) {
+            sh_highlightElement(elements[i], sh_languages[language]);
+          }
+        }
+        else {
+          throw 'HTTP error: status ' + request.status;
+        }
+      }
+      finally {
+        request = null;
+      }
+    }
+  };
+  request.send(null);
+}
+
+/**
+Highlights all elements containing source code on the current page. Elements
+containing source code must be "pre" elements with a "class" attribute of
+"sh_LANGUAGE", where LANGUAGE is a valid language identifier; e.g., "sh_java"
+identifies the element as containing "java" language source code.
+*/
+function sh_highlightDocument(prefix, suffix) {
+  var nodeList = document.getElementsByTagName('pre');
+  for (var i = 0; i < nodeList.length; i++) {
+    var element = nodeList.item(i);
+    var htmlClasses = sh_getClasses(element);
+    for (var j = 0; j < htmlClasses.length; j++) {
+      var htmlClass = htmlClasses[j].toLowerCase();
+      if (htmlClass === 'sh_sourcecode') {
+        continue;
+      }
+      if (htmlClass.substr(0, 3) === 'sh_') {
+        var language = htmlClass.substring(3);
+        if (language in sh_languages) {
+          sh_highlightElement(element, sh_languages[language]);
+        }
+        else if (typeof(prefix) === 'string' && typeof(suffix) === 'string') {
+          sh_load(language, element, prefix, suffix);
+        }
+        else {
+          throw 'Found 
 element with class="' + htmlClass + '", but no such language exists';
+        }
+        break;
+      }
+    }
+  }
+}
-- 
2.39.2