X-Git-Url: http://matita.cs.unibo.it/gitweb/?a=blobdiff_plain;ds=inline;f=helm%2Fsoftware%2Flambda-delta%2Fsrc%2Fautomath%2FautLexer.mll;fp=helm%2Fsoftware%2Flambda-delta%2Fsrc%2Fautomath%2FautLexer.mll;h=c0bc55afd2bd292ad96031fa3d8259d5ccdfb5b9;hb=39b42ed90bc74c8b6293842f1ac4aca60fc0c37e;hp=cb33d0c3fd414231f2d723aa480eb21769fae566;hpb=bb2a0b22a2c38b59ca664b550f34e5e40e6f04c7;p=helm.git diff --git a/helm/software/lambda-delta/src/automath/autLexer.mll b/helm/software/lambda-delta/src/automath/autLexer.mll index cb33d0c3f..c0bc55afd 100644 --- a/helm/software/lambda-delta/src/automath/autLexer.mll +++ b/helm/software/lambda-delta/src/automath/autLexer.mll @@ -10,11 +10,11 @@ V_______________________________________________________________ *) { - module L = Log - module O = Options - module P = AutParser + module L = Log + module G = Options + module AP = AutParser - let out s = if !O.debug_lexer then L.warn s else () + let out s = if !G.debug_lexer then L.warn s else () (* This turns an Automath identifier into an XML nmtoken *) let quote id = @@ -50,41 +50,41 @@ and token = parse | SPC { token lexbuf } | LC { line_comment lexbuf; token lexbuf } | OC { block_comment lexbuf; token lexbuf } - | "_E" { out "E"; P.E } - | "'_E'" { out "E"; P.E } - | "---" { out "EB"; P.EB } - | "'eb'" { out "EB"; P.EB } - | "EB" { out "EB"; P.EB } - | "--" { out "EXIT"; P.EXIT } - | "PN" { out "PN"; P.PN } - | "'pn'" { out "PN"; P.PN } - | "PRIM" { out "PN"; P.PN } - | "'prim'" { out "PN"; P.PN } - | "???" { out "PN"; P.PN } - | "PROP" { out "PROP"; P.PROP } - | "'prop'" { out "PROP"; P.PROP } - | "TYPE" { out "TYPE"; P.TYPE } - | "'type'" { out "TYPE"; P.TYPE } + | "_E" { out "E"; AP.E } + | "'_E'" { out "E"; AP.E } + | "---" { out "EB"; AP.EB } + | "'eb'" { out "EB"; AP.EB } + | "EB" { out "EB"; AP.EB } + | "--" { out "EXIT"; AP.EXIT } + | "PN" { out "PN"; AP.PN } + | "'pn'" { out "PN"; AP.PN } + | "PRIM" { out "PN"; AP.PN } + | "'prim'" { out "PN"; AP.PN } + | "???" { out "PN"; AP.PN } + | "PROP" { out "PROP"; AP.PROP } + | "'prop'" { out "PROP"; AP.PROP } + | "TYPE" { out "TYPE"; AP.TYPE } + | "'type'" { out "TYPE"; AP.TYPE } | ID { out "ID"; let s = Lexing.lexeme lexbuf in - if !O.unquote then P.IDENT s else P.IDENT (quote s) + if !G.unquote then AP.IDENT s else AP.IDENT (quote s) } - | ":=" { out "DEF"; P.DEF } - | "(" { out "OP"; P.OP } - | ")" { out "CP"; P.CP } - | "[" { out "OB"; P.OB } - | "]" { out "CB"; P.CB } - | "<" { out "OA"; P.OA } - | ">" { out "CA"; P.CA } - | "@" { out "AT"; P.AT } - | "~" { out "TD"; P.TD } - | "\"" { out "QT"; P.QT } - | ":" { out "CN"; P.CN } - | "," { out "CM"; P.CM } - | ";" { out "SC"; P.SC } - | "." { out "FS"; P.FS } - | "+" { out "PLUS"; P.PLUS } - | "-" { out "MINUS"; P.MINUS } - | "*" { out "TIMES"; P.TIMES } - | "=" { out "DEF"; P.DEF } - | eof { out "EOF"; P.EOF } + | ":=" { out "DEF"; AP.DEF } + | "(" { out "OP"; AP.OP } + | ")" { out "CP"; AP.CP } + | "[" { out "OB"; AP.OB } + | "]" { out "CB"; AP.CB } + | "<" { out "OA"; AP.OA } + | ">" { out "CA"; AP.CA } + | "@" { out "AT"; AP.AT } + | "~" { out "TD"; AP.TD } + | "\"" { out "QT"; AP.QT } + | ":" { out "CN"; AP.CN } + | "," { out "CM"; AP.CM } + | ";" { out "SC"; AP.SC } + | "." { out "FS"; AP.FS } + | "+" { out "PLUS"; AP.PLUS } + | "-" { out "MINUS"; AP.MINUS } + | "*" { out "TIMES"; AP.TIMES } + | "=" { out "DEF"; AP.DEF } + | eof { out "EOF"; AP.EOF }