var ph ps ch: wff*;
term wi (ph ps: wff): wff; infixr wi: -> 25;
term wn (ph: wff): wff; prefix wn: ~ 40;
axiom ax-1: << ph -> ps -> ph >>
(wi ph (wi ps ph));
axiom ax-2: << (ph -> ps -> ch) -> (ph -> ps) -> ph -> ch >>
(wi (wi ph (wi ps ch)) (wi (wi ph ps) (wi ph ch)));
axiom ax-3: << (~ph -> ~ps) -> ps -> ph >>
(wi (wi (wn ph) (wn ps)) (wi ps ph));
axiom ax-mp:
<< ph >> ph ->
<< ph -> ps >> (wi ph ps) ->
<< ps >> ps;
theorem a1i: << ph >> ph -> << ps -> ph >> (wi ps ph);
def wb (ph ps: wff): wff :=
<< ~((ph -> ps) -> ~(ps -> ph)) >>
(wn (wi (wi ph ps) (wn (wi ps ph)))) {
infix wb: <-> 20;
theorem bi1: << (ph <-> ps) -> ph -> ps >>
(wi (wb ph ps) (wi ph ps));
theorem bi2: << (ph <-> ps) -> ps -> ph >>
(wi (wb ph ps) (wi ps ph));
theorem bi3: << (ph -> ps) -> (ps -> ph) -> (ph <-> ps) >>
(wi (wi ph ps) (wi (wi ps ph) (wb ph ps)));
}
def wa (ph ps: wff): wff := << ~(ph -> ~ps) >> (wn (wi ph (wn ps))) {
infix wa: /\ 20;
theorem df-an: << (ph /\ ps) <-> ~(ph -> ~ps) >>
(wi (wa ph ps) (wn (wi ph (wn ps))));
}
def wtru (bound p: wff): wff := << p <-> p >> (wb p p) {
notation wtru := << T. >>;
theorem df-tru: << T. <-> (ph <-> ph) >> (wb wtru (wb ph ph));
}
pure nonempty sort set;
bound x y z w: set;
term wal (x: set) (ph: wff x): wff; prefix wal: A. 30;
def wex (x: set) (ph: wff x): wff :=
<< ~A. x ~ph >> (wn (wal x (wn ph))) {
prefix wex: E. 30;
theorem df-ex: << E. x ph <-> ~A. x ~ph >>
(wb (wex x ph) (wn (wal x (wn ph))));
}
axiom ax-gen: << ph >> ph -> << A. x ph >> (wal x ph);
axiom ax-4: << A. x (ph -> ps) -> A. x ph -> A. x ps >>
(wi (wal x (wi ph ps)) (wi (wal x ph) (wal x ps)));
axiom ax-5 (ph: wff): << ph -> A. x ph >> (wi ph (wal x ph));
var a b c: set;
term weq (a b: set): wff;
term wel (a b: set): wff;
{
local infix weq: = 50;
local infix wel: e. 40;
def weu (x: set) (ph: wff x): wff :=
<< E. y A. x (ph <-> x = y) >>
(wex y (wal x (wb ph (weq x y)))) {
prefix wex: E! 30;
theorem df-ex: << E! x ph <-> E. y A. x (ph <-> x = y) >>
(wb (weu x ph) (wex y (wal x (wb ph (weq x y)))));
}
axiom ax-6: << E. x x = a >> (wex x (weq x a));
axiom ax-7: << a = b -> a = c -> b = c >>
(wi (weq a b) (wi (weq a c) (weq b c)));
axiom ax-8: << a = b -> a e. c -> b e. c >>
(wi (weq a b) (wi (wel a c) (wel b c)));
axiom ax-9: << a = b -> c e. a -> c e. b >>
(wi (weq a b) (wi (wel c a) (wel c b)));
axiom ax-10: << ~A. x ph -> A. x ~ A. x ph >>
(wi (wal x ph) (wal x (wn (wal x ph))));
axiom ax-11: << A. x A. y ph -> A. y A. x ph >>
(wi (wal x (wal y ph)) (wal y (wal x ph)));
axiom ax-12 (ph: wff y): << A. y ph -> A. x (x = y -> ph) >>
(wi (weq x y) (wi (wal y ph) (wal x (wi (weq x y) ph))));
axiom ax-ext: << A. x (x e. a <-> x e. b) -> a = b >>
(wi (wal x (wb (wel x a) (wel x b))) (weq a b));
axiom ax-rep (ph: wff y z):
<< A. y E. x A. z (ph -> z = x) ->
E. x A. z (z e. x <-> E. y (y e. a /\ ph)) >>
(wi (wal y (wex x (wal z (wi ph (weq z x)))))
(wex x (wal z (wb (wel z x) (wex y (wa (wel y a) ph))))));
axiom ax-pow: << E. x A. y (A. z (z e. y -> z e. a) -> y e. x) >>
(wex x (wal y (wi (wal z (wi (wel z y) (wel z a))) (wel y x))));
axiom ax-un: << E. x A. y (E. z (y e. z /\ z e. a) -> y e. x) >>
(wex x (wal y (wi (wex z (wa (wel y z) (wel z a))) (wel y x))));
axiom ax-reg:
<< E. x x e. a -> E. x (x e. a /\ A. y (y e. x -> ~ y e. a)) >>
(wi (wex x (wel x a))
(wex x (wa (wel x z) (wal y (wi (wel y x) (wn (wel y a)))))));
axiom ax-inf:
<< E. x (a e. x /\ A. y (y e. x -> E. z (y e. z /\ z e. x))) >>
(wex x (wa (wel a x) (wal y (wi (wel y x)
(wex z (wa (wel y z) (wel z x)))))));
axiom ax-ac:
<< E. x A. y (y e. a -> E. z z e. y ->
E! z (z e. y /\ E. w (w e. x /\ y e. w /\ z e. w))) >>
(wex x (wal y (wi (wel y a) (wi (wex z (wel z y))
(weu z (wa (wel z y) (wex w
(wa (wa (wel w x) (wel y w)) (wel z w)))))))));
}
nonempty sort class;
var A B: class*;
term cab (x: set) (ph: wff x): class;
term welc (a: set) (A: class): wff;
notation cab (x: set) (ph: wff x) := << { x | ph } >>;
{
local infix welc: e. 50;
axiom ax-8b: << a = b -> a e. A -> b e. A >>
(wi (weq a b) (wi (welc a A) (welc b A)));
axiom ax-clab: << x e. {x | ph} <-> ph >>
(wb (welc x (cab x ph)) ph);
def wceq (A B: class): wff :=
<< A. x (x e. A <-> x e. B) >>
(wal x (welc x A) (welc x B)) {
infix wceq: = 50;
theorem df-ceq: << A = B <-> A. x (x e. A <-> x e. B) >>
(wb (wceq A B) (wal x (welc x A) (welc x B)));
}
def cv (a: set): class := << {x | wel x a} >>
(cab x (wel x a)) {
coercion cv: set -> class;
theorem df-cv: << a e. b <-> wel a b >>
(wb (welc a (cv b)) (wel a b));
}
}
def wcel (A B: class): wff :=
<< E. x (x = A /\ welc x B) >>
(wex x (wceq (cv x) A) (welc x B)) {
infix wcel: e. 50;
theorem df-cel: << A e. B <-> E. x (x = A /\ welc x B) >>
(wb (wcel A B) (wex x (wceq (cv x) A) (welc x B)));
}
I'm experimenting with a new language, Metamath Zero, an amalgamation of Metamath, Ghilbert and Lean syntax. The goal is to balance human readability and ease of writing a verifier, while also making some attempts to patch the bugs in metamath soundness (not that it's wrong, but there is a lot of soundness justification currently happening outside the metamath verifier kernel). Without giving too much explanation of what the syntax means (it's all fictional syntax at the moment, there is no parser yet), I would like to use some of you as guinea pigs for the "readability" aspect. How obvious is it what things mean? I will put some more explanatory comments at the end.nonempty sort wff;var ph ps ch: wff*;
term wi (ph ps: wff): wff; infixr wi: -> 25;
term wn (ph: wff): wff; prefix wn: ~ 40;
axiom ax-1: << ph -> ps -> ph >>
(wi ph (wi ps ph));
axiom ax-2: << (ph -> ps -> ch) -> (ph -> ps) -> ph -> ch >>
(wi (wi ph (wi ps ch)) (wi (wi ph ps) (wi ph ch)));
axiom ax-3: << (~ph -> ~ps) -> ps -> ph >>
(wi (wi (wn ph) (wn ps)) (wi ps ph));
axiom ax-mp:
<< ph >> ph ->
<< ph -> ps >> (wi ph ps) ->
<< ps >> ps;
--
You received this message because you are subscribed to the Google Groups "Metamath" group.
To unsubscribe from this group and stop receiving emails from it, send an email to metamath+u...@googlegroups.com.
To post to this group, send email to meta...@googlegroups.com.
Visit this group at https://groups.google.com/group/metamath.
For more options, visit https://groups.google.com/d/optout.
nonempty sort wff;var ph ps ch: wff*;
term wi (ph ps: wff): wff; infixr wi: -> 25;
What does the number say? Is it a precedence weight? Is the right-associativity hinted somewhere?
term wn (ph: wff): wff; prefix wn: ~ 40;
axiom ax-1: << ph -> ps -> ph >>
(wi ph (wi ps ph));
Is the line above an interpretation of the human readable infix, right-associative implication chain within double-angle brackets? In UPN format?
How do you forbid quantification over classes. Does this happen
since it is a new sort (your nonempty sort class;)
Problem is, you might end up with something else than ZFC.
term wal (x: set) (ph: wff x): wff; term cab (x: set) (ph: wff x): class;
def wtru (bound p: wff): wff := << p <-> p >> (wb p p) {
notation wtru := << T. >>;
theorem df-tru: << T. <-> (ph <-> ph) >> (wb wtru (wb ph ph));
}
def weu (x: set) (ph: wff x) (bound y: wff): wff :=
<< E. y A. x (ph <-> x = y) >>
(wex y (wal x (wb ph (weq x y)))) {
prefix wex: E! 30;
theorem df-eu: << E! x ph <-> E. y A. x (ph <-> x = y) >>
(wb (weu x ph) (wex y (wal x (wb ph (weq x y)))));
}
Here is a wish for a new system dubbed "Shullivan":
The ability to directly work with infix, etc.. no redundancy
with chevrons. I guess chevrons are only comments?
On Wed, 20 Feb 2019 06:32:39 -0500, Mario Carneiro <di....@gmail.com> wrote:
> I'm experimenting with a new language, Metamath Zero, an amalgamation of
> Metamath, Ghilbert and Lean syntax.
Syntax should be simple, though "simple" is in the eye of the beholder :-).
> var ph ps ch: wff*;
I don't see why you need a "*" here.
> term wi (ph ps: wff): wff; infixr wi: -> 25;
Why do you need to repeat "wi:" here?
I wonder if adding a "precedence: 25" would help, else the number is a mystery.
> axiom ax-1: << ph -> ps -> ph >>
> (wi ph (wi ps ph));
I see the s-expression below the axiom, but isn't that trivial to re-discover?
I think you should apply "Don't repeat yourself (DRY)", or a verifier will
have to recheck something that shouldn't need to be said at all.
> axiom ax-mp:
>
> << ph >> ph ->
> << ph -> ps >> (wi ph ps) ->
> << ps >> ps;
That's a lot of chevrons, and that seems much harder to follow
than metamath's current approach.
Axioms, in particular, are a big deal. It should be
"as obvious as possible" to the reader that they're correct.
I'm fine with s-expressions, but a lot of people DO NOT understand them.
If you make people input s-expressions, you might lose
some people entirely. In cases where the s-expression can be easily determined,
you shouldn't make a human create them.
You might consider using neoteric expressions instead of s-expressions.
They have the same meaning, but look more like math.
Basically "x(a b c)" means the same thing as "(x a b c)".
More details available here from Readable Lisp (my project):
https://readable.sourceforge.io/
That would convert this:
> (wn (wi (wi ph ps) (wn (wi ps ph))))
into this:
> wn(wi(wi(ph ps) wn(wi(ps ph))))
> theorem bi2: << (ph <-> ps) -> ps -> ph >>
> (wi (wb ph ps) (wi ps ph));
Again, I don't see why you need to expressly state the syntax proof.
It's noise for the human, and the computer can recalculate & check that trivially.
I see infixr. Are you planning to support arbitrary-length expressions of the
same operator, e.g., "( 1 + 2 + 3 + 4 )"?
> * The notation system turned out a bit more complicated than I'd hoped.
> Each "math expression" is stated twice, once in chevrons and once in
> s-expression notation. The intent is that the system actually uses the
> s-expressions to represent the axioms (so there is no parse tree funny
> business), and because they form part of the trust base they are displayed
> to the user. The notation in the chevrons is checked only in the reverse
> direction - the s-expression is printed and compared to the given string.
> (In an IDE there would be a mode where you only type the notation and the
> computer generates the s-expression.) There is still a danger of ambiguous
> parsing, but at least you can see the parse directly in the case of a
> mismatch, and the worst effect of a bad parse is that the theorem being
> proven is not what the user thought - the ambiguity can't be further
> exploited in the proof.
Wouldn't it be simpler to simply require that you cannot add a new
axiom or definition if it results in a syntactic ambiguity?
I *think* that would get rid of the "funny business" while eliminating duplicate
declarations. But that depends on what "funny business" you mean,
and I'm not sure I know what you're referring to.
> You might wonder where the proofs are. They go in a separate file, not
> human readable. The idea is that this file contains all the information
> that is necessary to appraise the content and correctness of a theorem, and
> the proof file is just directions to the verifier to produce a proof of the
> claimed theorems.
I think it's important that the proofs include the specific step-by-step instructions
as metamath does now - that makes it trivial to verify by humans and computers.
If it *also* has higher-level instructions (say, to try to reconstruct a proof if some
definitions have changed) that's fine.
On Wed, Feb 20, 2019 at 8:32 AM Mario Carneiro <di....@gmail.com> wrote:
I'm experimenting with a new language, Metamath Zero, an amalgamation of Metamath, Ghilbert and Lean syntax. The goal is to balance human readability and ease of writing a verifier, while also making some attempts to patch the bugs in metamath soundness (not that it's wrong, but there is a lot of soundness justification currently happening outside the metamath verifier kernel). Without giving too much explanation of what the syntax means (it's all fictional syntax at the moment, there is no parser yet), I would like to use some of you as guinea pigs for the "readability" aspect. How obvious is it what things mean? I will put some more explanatory comments at the end.nonempty sort wff;
Easy to write a verifier: Stop right here.
A large number of keywords will make any verifier complex. Think about how to be able to declare syntax, in the language itself. This will leave the compiler "soft", but will also allow expansions not to make the collection invalid. Look at proofs in HOL that are no longer possible to run today.
In a language I started, almost all statements started with "def" or "var". This allowed for having zillions of keywords within def, but which were not reserved words in the language itself.
In basic definitions, verbose:typecode wff ::nonempty;
var ph ps ch: wff*;
The : is also a keyword from what I understand. Style suggestion. Use var a b c :wff instead of var a b c: wff. That is, leave the : more attached to the type than to one of the variables.
By the way, only one type per variable? Think about adding something like partial types, or to use concepts of statically typed languages, declaring interfaces (::)?
term wi (ph ps: wff): wff; infixr wi: -> 25;
term wn (ph: wff): wff; prefix wn: ~ 40;
Here I suggest being as verbose as possible, more syntactic than logical.syntax wi ( ph :wff "->" ps :wff) :wff infix.right 25;Alternatively:syntax wi :wff ( ph :wff "->" ps :wff) infix.right 25;That is, types closest to the name, definition inside the parentheses, operator defined within quotes (escaped by \)
Operators like this, in quotes, allow a bit of language abuse, but also allow you to create things like writing Latex-style operations: \dot
The advantage of having a separate command for prefix/infix/suffix precedence is to tweak the priorities in advance. But it is asking for confusion. I will not suggest taking the command, but at least have the option to put priority in the syntax command itself to encourage it to stay in one place.
axiom ax-1: << ph -> ps -> ph >>
(wi ph (wi ps ph));
Here a weird suggestion, but... Axioms -> syntax. Then use the command, much like...
syntax ax-1 : "ph -> ps -> ph" :: (wi ph (wi ps ph)) ;Here : and :: are reused, with space to the right (without a type), only to denote clear points for the parser.
Or if you find overloading the syntax ugly (and it is), the suggestion above about "def":def.syntax wi :wff ( ph :wff "->" ps :wff) infix.right 25;def.axiom ax-1 : "ph -> ps -> ph" :: (wi ph (wi ps ph)) ;
axiom ax-2: << (ph -> ps -> ch) -> (ph -> ps) -> ph -> ch >>
(wi (wi ph (wi ps ch)) (wi (wi ph ps) (wi ph ch)));
axiom ax-3: << (~ph -> ~ps) -> ps -> ph >>
(wi (wi (wn ph) (wn ps)) (wi ps ph));
axiom ax-mp:
<< ph >> ph ->
<< ph -> ps >> (wi ph ps) ->
<< ps >> ps;
theorem a1i: << ph >> ph -> << ps -> ph >> (wi ps ph);
def.theorem a1i: << ph >> ph -> << ps -> ph >> (wi ps ph);That is, def is effectively a reserved word, and therefore can be specialized via .special. There will be def without specialization for everything else.
Other than that, general suggestions.
- The notion of local blocks, delimited by {} is good, but will probably clash with other uses. When building the parser pay attention to this. Starting everything by "def" helps mitigate this problem.
- The notion of separate and "solid" files is very important. I strongly suggest putting this in the language from the first version. A simple:file "path/name";in the solid file indicates where or to where the next lines go in case of split, and those lines, and a "master" file, containing only:file.include "path/name";is produced when split, allowing reconstruction.
- Instead of using << and >>, which certainly already has use, I suggest separating parts of syntax by <? and ?>, common from XML.
- In syntax, even within <? and ?>, everything that is expected to be fixed must be enclosed in quotation marks, and anything outside the quotation marks has some programmatic meaning.def.syntax wi :wff <? "(" ph :wff "->" ps :wff ")" ?> infix.right 25 ;def.axiom ax-1 <? ph -> ps -> ph ?> (wi ph (wi ps ph)) ;Which is more verbose than before, but leaves the parser simultaneously simpler and more flexible. In the first case the compiler knows that it is defining a thing, given by the quotation marks. In the second case everything is expected, and should come from the construction by s-expressions.
Looking at this now... (wi ph (wi ps ph)) will create a lot of "(" and ")" that does not exist in <? ph -> ps -> ph ?>. You realize that, I guess.
On Wed, 20 Feb 2019 06:32:39 -0500, Mario Carneiro <di....@gmail.com> wrote:
> I'm experimenting with a new language, Metamath Zero, an amalgamation of
> Metamath, Ghilbert and Lean syntax. The goal is to balance human
> readability and ease of writing a verifier, while also making some attempts
> to patch the bugs in metamath soundness (not that it's wrong, but there is
> a lot of soundness justification currently happening outside the metamath
> verifier kernel).
If possible, I think it'd be good to have an additional reason to create a new language,
or at least argue why similar results can't be done with tweaks to
existing mechanisms like $j.
1. Putting more soundness justifications into the underlying verifier
is indeed a worthy goal. But most of set.mm is proofs, not definitions,
and there are very few "axioms" in the traditional sense... so it's not clear
that all the verifiers need to check that so much.
I can also imagine tweaking existing systems instead of creating a new language.
2. Making the input "more readable" is worthwhile, but I'm not sure that
by *itself* this is enough. Making it so that we can be more flexible
about parentheses would *certainly* be a nice improvement.
Supporting ph /\ ps /\ ch /\ ... would be nice as well.
Expanding on #2: If it can manage to easily support arbitrary-length arguments like
ph /\ ps /\ ch /\ th /\ ... then I can suddenly see that it's able to do something
that Metamath today can't. Is that easy? Worth it?
I don't know, but that *would* go beyond our current setup.
No matter what, I recommend that you do NOT depend
too much on precedence, even when available.
"Developer beliefs about binary operator precedence (part 1)” by Derek M. Jones
http://www.knosof.co.uk/cbook/accu06.html
"a survey of developers at the 2006 ACCU conference to determine if they correctly applied the precedence and associativity of the binary operators common to C, C++, C#, Java, Perl, and PHP. In this experiment only 66.7% of the answers were correct (standard deviation 8.8, poorest performer 45.2% correct, best performer 80.5% correct); this was not much better than random chance (50%). Even many widely-used operator pairs had relatively poor results; only 94% of the problems were correctly solved when they combined * and +, 92% when they combined * and -, and 69% when they combined / and +."
Professional mathematicians probably do get precedence correct, but where possible I'd like the results to be understood by others.
When we developed my "Readable Lisp" we supported infix but decided to NOT support precedence. The trade-offs are very different here, so I don't think that reasoning really applies here. But my point is that supporting lots of precedence levels isn't really, by itself, a big win.
> * "def" declares a definition. Different from both Ghilbert and Metamath,
> definitions here are "limited lifetime unfolded". Each def has a block
> after it containing one or more theorems about the definition. These
> theorems are proven where the definition is replaced by the definiendum,
> and once the block is closed these theorems are the only access to the
> definition. (This enables some information privacy if desired.) See wb for
> example, which uses characterizing theorems instead of an iff.
That makes sense. However, in some cases you will want a way to say
"don't use this theorem" so you can prove statements that you don't use.
But what about proving theorems that have to "reach into" multiple definitions to work?
You might need a way to express that you're intentionally breaking the rules.
> * The notation system turned out a bit more complicated than I'd hoped.
> Each "math expression" is stated twice, once in chevrons and once in
> s-expression notation.
As I noted earlier, stating things twice is undesirable,
and I don't think it's necessary in this case.
On Wed, 20 Feb 2019 22:47:43 -0500, Mario Carneiro <di....@gmail.com> wrote:
> Translating from metamath to mm0 is a bit trickier, because some annotation
> information in the mm0 file is not present in mm files. If you skip some of
> the foundational material (roughly the contents of this file), translating
> a set.mm-like file should be straightforward and automatable.
It's always possible to slip in some info in comments if all are agreeable.
It's how $j statements work now.
> I didn't demonstrate this in the file, but the chevrons are optional. So
> you could just write:
>
> axiom ax-mp: ph -> (wi ph ps) -> ps;
That switch back-and-forth is rather confusing for readability.
> and it would be understood just as well. In such a small axiom as this that
> might be a reasonable thing to do (and in particular for single wffs the <<
> ph >> ph business just looks silly). However the s-expressions are NOT
> optional. This is one of the things that would be omitted in the "less
> verbose" version of the file - you might just write
>
> axiom ax-mp: << ph >> -> << ph -> ps >> -> << ps >>;
>
> and the compiler, which has a proper parser, would produce the
> s-expressions for you.
Notationally this seems much worse than current Metamath:
min $e |- ph $.
maj $e |- ( ph -> ps ) $.
ax-mp $a |- ps $.
In current Metamath I have a name for every part (so I can refer to each one),
I can see the syntax directly, and I don't have a festoon of << ... >> everywhere
that makes it hard to see what's acting. Chevrons are TERRIBLE
when there are arrows, they look too much alike.
Also, don't you want -> to mean implication? It seems to be used in two different ways here.
> The problem with metamath's current approach is that it's subtly unsafe, as
> we saw recently with the example of syntax ambiguity causing proofs of
> false. Metamath treats statements as lists of symbols, which means that you
> can do some shenanigans with ambiguity if the grammar supports it. To
> prevent this, the verifier here works entirely with trees (encoded as
> s-expressions). So the worst that can happen is that a theorem doesn't say
> what you think, it can't be exploited in the middle of a proof.
I *agree* with you that processing-as-trees is the better approach.
That drops MIU, but I think supporting MIU isn't really necessary.
I disagree that the s-expressions needs to be exposed to the poor hapless user.
The verifier can trivially figure out what -> refers to; users should not
need to look up "wi" unless they're debugging a verifier.
Re: The shenanighans have already been dealt with: demand an unambiguous
parse by some criteria. If you can't show it's unambiguous, then it's not okay.
I think that'd be reasonable here too.
> > theorem bi2: << (ph <-> ps) -> ps -> ph >>
> > > (wi (wb ph ps) (wi ps ph));
> >
> > Again, I don't see why you need to expressly state the syntax proof.
> > It's noise for the human, and the computer can recalculate & check that
> > trivially.
> >
>
> Not if the verifier doesn't have a parser! I'm worried that if I add a
> parser this will be a large fraction of the code, and it will be a large
> moving part that could contribute to uncertainty that the program has no
> bugs. (There will be a parser of course, for the code syntax itself, but it
> will be statically known so you can write it in a few functions.)
I agree that you don't want an overly *complicated* parser.
But I think you're trading away too much. If your goal is readability,
then "(wi ph ps)" is *terrible* compared to "ph -> ps".
Metamath maximizes simple parsing, and it manages to support ph -> ps.
I don't think you need to go that far backwards.
A relatively simple tree parser should work just fine.
> > Wouldn't it be simpler to simply require that you cannot add a new
> > axiom or definition if it results in a syntactic ambiguity?
> > I *think* that would get rid of the "funny business" while eliminating
> > duplicate
> > declarations. But that depends on what "funny business" you mean,
> > and I'm not sure I know what you're referring to.
> >
>
> How will you know if new notation (not axiom/definition) results in a
> syntax ambiguity? The general problem is undecidable. What I want to avoid
> is a gnarly grammar with a complicated ambiguity being exploited somewhere
> deep in the proof (which isn't even being displayed) and causing something
> to be provable that shouldn't be.
If you are trying to solve the general problem you're trying too hard :-).
If you use a traditional LR parser, then make a table, and if you have shift/shift or
shift/reduce ambiguities given N tokens (whatever N is) and the precedence rules,
it's ambiguous. Done. You don't need - or want - infinite lookahead.
Parsing is well-studied.
Most languages have fixed operations instead of adding operations as they go.
I've not tried to parse language where it adds operators with precedence values,
so I don't know how difficult it is. Prolog does it, though, so I suspect it's not too hard.
Another approach would be to put all the syntax definitions in a separate file,
and use that to generate the parser to be used later.
That can be fast; most of the time the syntax definitions won't change, so you can
cache the results & regenerate the table/code only when the definitions change.
That would be an entirely conventional way to implement a parser, so there
are lots of tools you can use (which would reduce the code to be written).
Even easier: just drop precedence entirely. You don't need it;
people often add "unnecessary" parentheses for clarity.
If your goal is simplicity, that would be simple.
Implementing precedence is a lot of work for a mechanism that is
rarely used in practice. At the least, you could start *without* precedence,
and then add it later if desired.
On Wed, 20 Feb 2019 23:16:33 -0500, Mario Carneiro <di....@gmail.com> wrote:
> Everyone knows that + is lower precedence than *
Well, no.
You & I know that. But only 94% of professional programmers with 5+ years
got the correct answer when surveyed at a conference.
And random chance gives you 50%.
> If you have a solution to:
>
> * Simple static parser
> * Readable math
> * Guaranteed unambiguity
> * Minimal parsing "surprises"
Well, let's start with an easy approach & see if it works.
LR parsers are extensively studied & implemented, let's start there.
* Simple: If you make syntax a separate file, you can generate a simple static parser.
* Readable math: Most math systems use LR or LL parsers, so you're no worse off than most.
* Guaranteed unambiguity: They *require* unambiguity or a tie-breaking rule - so if you say that
only precedence rules can break ties, that gives you unambiguity. Basically treat a shift/shift or a shift/reduce conflict as an error if precedence doesn't break it.
* Minimal parsing "surprises": These are very widely used, so they shouldn't surprise anyone.
You could make k=5 (that is, LR(5)).
I'm guessing you believe that's not enough. Can you help me understand why?
Indeed, dealing with modularization seems pretty important and apparently caused problems for Raph, so perhaps I should give it more thought.In a specification file such as this, we don't have to worry about proof dependencies, but we do have to be able to import axiom systems and syntax from another file. But now I'm worried about the possible security vulnurabilities this causes. A specification file is supposed to be self contained in the sense that if you know the language then it says what it means. If a specification file imports another, then the meaning of the first file depends on the meaning of that other file as well. It's even worse if the path is ambiguous in some way, and we don't see exactly what it's referring to. If the includes are relative to a path provided at the command line, then it's not even definite what file is being referred to.One way to make sure the import is right is to reproduce the axioms and theorems that are expected to be imported. This way the verifier can double check that all the files are stitched together correctly. But that may well be the whole file or nearly so (theorems don't need to be imported, but axioms and definitions do). And that's a hell of a lot of boilerplate, and also obscures the actual content of the file if it starts with a lengthy recap section.The specification file / proof file dichotomy is supposed to represent the distinction between *what* is being proven and *how* it should be proven. The proof file is a fine place for putting in instructions for how to find other files, but the specification files should be more focused on content and cross checks.I will return to this topic in a bit with a more concrete syntax proposal.
I can relate to "make the metamath parser simple, unambiguous and efficient"
I have just written an Antlr4 grammar for an old version of
metamath. But the performance was so bad (it took minutes to parse
4000 set.mm statements, with some statements like
|- ( ( ( ( ( ∨1c e. RR* /\\ ∨2c e. RR* ) /\\ -. ( ∨1c = 0 \\/ ∨2c = 0 ) ) /\\ -. ( ( ( 0 < ∨2c /\\ ∨1c = +oo ) \\/ ( ∨2c < 0 /\\ ∨1c = -oo ) ) \\/ ( ( 0 < ∨1c /\\ ∨2c = +oo ) \\/ ( ∨1c < 0 /\\ ∨2c = -oo ) ) ) ) /\\ -. ( ( ( 0 < ∨2c /\\ ∨1c = -oo ) \\/ ( ∨2c < 0 /\\ ∨1c = +oo ) ) \\/ ( ( 0 < ∨1c /\\ ∨2c = -oo ) \\/ ( ∨1c < 0 /\\ ∨2c = +oo ) ) ) ) -> ∨1c e. RR )"
that took like 9s to parse, I did not try to optimize it though)... the parser failed epically, by throwing an out of stack space exception (too much garbage to collect)
I had another go with the same parser BUT with doing a first pass
of the statement finding opening and closing braces,
so that I could parse subgroups and rebuild the tree with the
result
It worked a lot better (like 30x) : parsing set.mm in 10s ?
I would welcome being able to write a simple parser without using
a tool like antlr4 (that only has a few language targets and not
Kotlin, my favorite language) but it would be nice to be able to
find groups that can be parsed independently in the metamath
language as it obviously speeds things up 30x, and makes it also
possible to use many threads to parse 1 sentence (another speed up
possible there)
Yet, the parser that I wrote isn't a dynamic parser and I will
have to look into/implement that if I want the maths it can handle
to be extanded.
(At one point, I'll have to implement the same wikipedia
algorithm)
So, what I am trying to say is :
a) groups (stuff with easily found bounds like () ) are nice to
have whereas rules like (wiso statement)
c Isom c COMMA c LEFT_PARENTHESIS c COMMA c RIGHT_PARENTHESIS {$op="wiso";}
make things more difficult
things with prefix and clean separation are nice, things with
adjacency are complicated :/
b) if it is easy to make things computer-understandable (build a tree), it might easy (or not :/)to turn those things into human-friendly stuff (display, write and interact with it nicely) with another software layer that isn't in the language
c) writing things twice should be a big no no for any programmer and mathematician
d) even if you write things twice in your database, you'll still
have to build a parser to handle human input anyway :)
e) maybee there are other solutions ? like use colored spaces to
separate tokens and handle nesting ?
I have been playing 8 months with metamath, It took me a lot of
time to adapt and I am not sure the new language is better at
readibility, it hurt the first time I saw it (But I still love you
Mario, I'm a great fan, have been using your stuff all over the
place)
f) I have been doing university level maths for 20 years and
software development the last 6 years and the only precedence
order I know is about +-*/^(). So...
g) I would love to have metamath look more like the maths I write
on the blackboard (those braces !) but I would rather have a
language that is understood by a computer and easy to work with
Cheers,
Olivier Binda
grammar Parser2; statement : provenStatement | wffStatement | classStatement | setStatement; setClassOrWff : s | w | c ; setStatement : Set s; classStatement : Class c; provenStatement : Provable w; wffStatement : Wff w; c : classHolder | s | cC | cC2 | c0 | cC3 | cC7 | cSC2 | cS3W | cSCSC2 | cC4 | cC5 | cSCW | cSC3 | cC8 | cCSC | cSW | cC3S | cWC2 | cS2W | cC2S | cS | cC6 ; w : wffHolder | wW | wW2 | wSW | wC2 | wSC2 | wC | wW12 | w0 | wW3 | wS2 | wW4 | wW6 | wW9 | wSCW | wS2W | wW8 | wSC | wCSW | wC3 | wW5 | wW10 | wW11 | wC5 | wW7 ; s : setHolder ; wW locals[String op] : HYPHEN_MINUS_FULL_STOP w {$op="wn";} | LESS_THAN_SIGN_GREATER_THAN_SIGN w {$op="wdia";} | LEFT_SQUARE_BRACKET_FULL_STOP_RIGHT_SQUARE_BRACKET w {$op="wbox";} | LEFT_PARENTHESIS_RIGHT_PARENTHESIS w {$op="wcirc";} ; wW2 locals[String op] : LEFT_PARENTHESIS w LESS_THAN_SIGN_HYPHEN_MINUS_GREATER_THAN_SIGN w RIGHT_PARENTHESIS {$op="wb";} | LEFT_PARENTHESIS w HYPHEN_MINUS_SOLIDUS_REVERSE_SOLIDUS w RIGHT_PARENTHESIS {$op="wnan";} | LEFT_PARENTHESIS w HYPHEN_MINUS_GREATER_THAN_SIGN w RIGHT_PARENTHESIS {$op="wi";} | LEFT_PARENTHESIS w SOLIDUS_REVERSE_SOLIDUS w RIGHT_PARENTHESIS {$op="wa";} | LEFT_PARENTHESIS w Until w RIGHT_PARENTHESIS {$op="wunt";} | LEFT_PARENTHESIS_FULL_STOP w HYPHEN_MINUS_GREATER_THAN_SIGN_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvd1";} | LEFT_PARENTHESIS w REVERSE_SOLIDUS_SOLIDUS w RIGHT_PARENTHESIS {$op="wo";} | LEFT_PARENTHESIS w REVERSE_SOLIDUS_SOLIDUS_LOW_LINE w RIGHT_PARENTHESIS {$op="wxo";} | LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvhc2";} ; wSW locals[String op] : A_FULL_STOP s w {$op="wal";} | E_EXCLAMATION_MARK s w {$op="weu";} | E_ASTERISK s w {$op="wmo";} | F_SOLIDUS s w {$op="wnf";} | E_FULL_STOP s w {$op="wex";} ; wC2 locals[String op] : c EQUALS_SIGN_SOLIDUS_EQUALS_SIGN c {$op="wne";} | c ESOLIDUS c {$op="wnel";} | c Se c {$op="wse";} | c DefAt c {$op="wdfat";} | c C_FULL_STOP c {$op="wpss";} | c LOW_LINESe c {$op="w-bnj13";} | c Po c {$op="wpo";} | c Fn c {$op="wfn";} | c Or c {$op="wor";} | c Er c {$op="wer";} | c EQUALS_SIGN c {$op="wceq";} | c EFULL_STOP c {$op="wcel";} | c C_LOW_LINE c {$op="wss";} | c Fr c {$op="wfr";} | c We c {$op="wwe";} | c LOW_LINEFrSe c {$op="w-bnj15";} ; cC locals[String op] : LEFT_CURLY_BRACKET c RIGHT_CURLY_BRACKET {$op="csn";} | Suc c {$op="csuc";} | OR c {$op="cofr";} | ORVC c {$op="corvc";} | FullFun c {$op="cfullfn";} | Dom c {$op="cdm";} | OF c {$op="cof";} | HYPHEN_MINUSu c {$op="cneg";} | TILDEP c {$op="cpw";} | HYPHEN_MINUS_FULL_STOPg c {$op="cgon";} | Image c {$op="cimage";} | Funpart c {$op="cfunpart";} | Tpos c {$op="ctpos";} | Curry c {$op="ccur";} | Uncurry c {$op="cunc";} | AC_LOW_LINE c {$op="wacn";} | Locally c {$op="clly";} | U_FULL_STOP c {$op="cuni";} | VERTICAL_LINE_CIRCUMFLEX_ACCENT_VERTICAL_LINE c {$op="cint";} | HYPHEN_MINUSe c {$op="cxne";} | LESS_THAN_SIGN_QUOTATION_MARK c QUOTATION_MARK_GREATER_THAN_SIGN {$op="cs1";} | N_HYPHEN_MINUSLocally c {$op="cnlly";} | OFC c {$op="cofc";} | Word c {$op="cword";} | Slot c {$op="cslot";} | Fix c {$op="cfix";} | GRAVE_ACCENT_APOSTROPHE c {$op="ccnv";} | Ran c {$op="crn";} | Recs LEFT_PARENTHESIS c RIGHT_PARENTHESIS {$op="crecs";} ; wSC2 locals[String op] : DisjLOW_LINE s EFULL_STOP c c {$op="wdisj";} ; cC2 locals[String op] : LEFT_PARENTHESIS c X_FULL_STOP c RIGHT_PARENTHESIS {$op="cxp";} | LEFT_CURLY_BRACKET c COMMA c RIGHT_CURLY_BRACKET {$op="cpr";} | LEFT_PARENTHESIS c GRAVE_ACCENT c RIGHT_PARENTHESIS {$op="cfv";} | Pprod LEFT_PARENTHESIS c COMMA c RIGHT_PARENTHESIS {$op="cpprod";} | GcdOLD LEFT_PARENTHESIS c COMMA c RIGHT_PARENTHESIS {$op="cgcdOLD";} | LEFT_PARENTHESIS c OFULL_STOP c RIGHT_PARENTHESIS {$op="ccom";} | A_FULL_STOPg c c {$op="cgol";} | LESS_THAN_SIGN_FULL_STOP c COMMA c GREATER_THAN_SIGN_FULL_STOP {$op="cop";} | Seqom LEFT_PARENTHESIS c COMMA c RIGHT_PARENTHESIS {$op="cseqom";} | LOW_LINE c c {$op="cdp2";} | LEFT_PARENTHESIS c ICIRCUMFLEX_ACCENTi c RIGHT_PARENTHESIS {$op="cin";} | LEFT_PARENTHESIS c QUOTATION_MARK c RIGHT_PARENTHESIS {$op="cima";} | LESS_THAN_SIGN_LESS_THAN_SIGN c COMMA c GREATER_THAN_SIGN_GREATER_THAN_SIGN {$op="caltop";} | LEFT_PARENTHESIS c XX_FULL_STOP c RIGHT_PARENTHESIS {$op="caltxp";} | LESS_THAN_SIGN_QUOTATION_MARK c c QUOTATION_MARK_GREATER_THAN_SIGN {$op="cs2";} | PtDf LEFT_PARENTHESIS c COMMA c RIGHT_PARENTHESIS {$op="cptdfc";} | Rec LEFT_PARENTHESIS c COMMA c RIGHT_PARENTHESIS {$op="crdg";} | LEFT_SQUARE_BRACKET c RIGHT_SQUARE_BRACKET c {$op="cec";} | LEFT_PARENTHESIS c SOLIDUS_FULL_STOP c RIGHT_PARENTHESIS {$op="cqs";} | LEFT_PARENTHESIS c LEFT_PARENTHESIS_PLUS_SIGN_PLUS_SIGN_RIGHT_PARENTHESIS c RIGHT_PARENTHESIS {$op="csymdif";} | LEFT_PARENTHESIS c REVERSE_SOLIDUS c RIGHT_PARENTHESIS {$op="cdif";} | LEFT_PARENTHESIS c UFULL_STOP c RIGHT_PARENTHESIS {$op="cun";} | LEFT_PARENTHESIS c VERTICAL_LINE_GRAVE_ACCENT c RIGHT_PARENTHESIS {$op="cres";} | OrdIso LEFT_PARENTHESIS c COMMA c RIGHT_PARENTHESIS {$op="coi";} | SEMICOLON c c {$op="cdc";} | E_FULL_STOPg c c {$op="cgox";} | LEFT_PARENTHESIS c LEFT_PARENTHESISxRIGHT_PARENTHESIS c RIGHT_PARENTHESIS {$op="ctxp";} | LEFT_PARENTHESIS c APOSTROPHE_APOSTROPHE_APOSTROPHE c RIGHT_PARENTHESIS {$op="cafv";} ; wC locals[String op] : Rel c {$op="wrel";} | Smo c {$op="wsmo";} | Ord c {$op="word";} | Lim c {$op="wlim";} | Tr c {$op="wtr";} | Fun c {$op="wfun";} | Prt c {$op="wprt";} ; c0 locals[String op] : Fi {$op="cfi";} | Card {$op="ccrd";} | Q_FULL_STOP {$op="cnq";} | DIGIT_ONER {$op="c1r";} | PLUS_SIGNR {$op="cplr";} | CC {$op="cc";} | LESS_THAN_SIGN {$op="clt";} | Splice {$op="csplice";} | Reverse {$op="creverse";} | Gcd {$op="cgcd";} | OdZ {$op="codz";} | Struct {$op="cstr";} | C_omp {$op="cco";} | DIGIT_ZEROg {$op="c0g";} | XsFULL_STOP {$op="cxps";} | Iso {$op="ciso";} | HomA {$op="choma";} | SetCat {$op="csetc";} | Dirset {$op="cdrs";} | Toset {$op="ctos";} | Lat {$op="clat";} | CLat {$op="ccla";} | ODual {$op="codu";} | TosetRel {$op="ctsr";} | Tail {$op="ctail";} | GrpIso {$op="cgim";} | Cntr {$op="ccntr";} | OppG {$op="coppg";} | VarFGrp {$op="cvrgp";} | DProd {$op="cdprd";} | MulGrp {$op="cmgp";} | RingIso {$op="crs";} | PID {$op="cpid";} | Eval {$op="cevl";} | EvalSubDIGIT_ONE {$op="ces1";} | Proj {$op="cpj";} | TopBases {$op="ctb";} | TILDE_TILDE_GREATER_THAN_SIGNt {$op="clm";} | UFil {$op="cufil";} | TopVec {$op="ctvc";} | ASTERISKMetSp {$op="cxme";} | OmN {$op="comn";} | Cau {$op="cca";} | CMetSp {$op="ccms";} | Coeff {$op="ccoe";} | Area {$op="carea";} | Lam {$op="cvma";} | SOLIDUSL {$op="clgs";} | TPLUS_SIGN {$op="ctcl";} | GId {$op="cgi";} | CIRCUMFLEX_ACCENTg {$op="cgx";} | ExId {$op="cexid";} | CVecOLD {$op="cvc";} | FULL_STOPsOLD {$op="cns";} | SubSp {$op="css";} | TILDEH {$op="chil";} | Cauchy {$op="ccau";} | PLUS_SIGNop {$op="chos";} | FULL_STOPfn {$op="chft";} | LinOp {$op="clo";} | Bra {$op="cbr";} | LESS_THAN_SIGN_LOW_LINEop {$op="cleo";} | SigAlgebra {$op="csiga";} | Retr {$op="cretr";} | HYPHEN_MINUS_GREATER_THAN_SIGNg {$op="cgoi";} | Qp {$op="cqp";} | No {$op="csur";} | Bigcup {$op="cbigcup";} | Btwn {$op="cbtwn";} | TransportTo {$op="ctransport";} | InnerFiveSeg {$op="cifs";} | Line {$op="cline2";} | PresetRel {$op="cpresetrel";} | Ub {$op="cub";} | Lb {$op="clb";} | DIGIT_ZEROcv {$op="c0cv";} | ProdObj {$op="cprodo";} | CodSetCat {$op="ccodcase";} | SetCatOLD {$op="ccaset";} | LOW_LINE_VERTICAL_LINE_LOW_LINEc {$op="cfals";} | Bic {$op="cbic";} | Derv {$op="cderv";} | Halfplane {$op="chalfp";} | Segtrg {$op="csegtrg";} | Neug {$op="cneug";} | Rn {$op="crrn";} | PrRing {$op="cprrng";} | RmY {$op="crmy";} | LineDIGIT_THREE {$op="cline3";} | Trails {$op="ctrail";} | Circuits {$op="ccrct";} | LogLOW_LINE {$op="clog_";} | CvLat {$op="clc";} | Trn {$op="ctrnN";} | EDRingR {$op="cedring-rN";} | Mapd {$op="cmpd";} | DIGIT_TWOo {$op="c2o";} | DIGIT_FOURo {$op="c4o";} | CIRCUMFLEX_ACCENTo {$op="coe";} | TILDE_TILDE {$op="cen";} | TILDE_LESS_THAN_SIGN_LOW_LINE {$op="cdom";} | TILDE_LESS_THAN_SIGN {$op="csdm";} | Har {$op="char";} | TC {$op="ctc";} | FULL_STOPpQ {$op="cmpq";} | SOLIDUSQ {$op="cerq";} | FULL_STOPQ {$op="cmq";} | LESS_THAN_SIGNQ {$op="cltq";} | FULL_STOPpR {$op="cmpr";} | DIGIT_ONE {$op="c1";} | HYPHEN_MINUSoo {$op="cmnf";} | DIGIT_TWO {$op="c2";} | DIGIT_SIX {$op="c6";} | DIGIT_NINE {$op="c9";} | LEFT_SQUARE_BRACKET_COMMA_RIGHT_PARENTHESIS {$op="cico";} | FULL_STOP_FULL_STOP_FULL_STOP {$op="cfz";} | Mod {$op="cmo";} | Concat {$op="cconcat";} | Substr {$op="csubstr";} | TILDE_TILDE_GREATER_THAN_SIGNr {$op="crli";} | O_LEFT_PARENTHESIS_DIGIT_ONE_RIGHT_PARENTHESIS {$op="co1";} | Exp {$op="ce";} | Sin {$op="csin";} | Cos {$op="ccos";} | Tan {$op="ctan";} | Sadd {$op="csad";} | Z_LEFT_SQUARE_BRACKETiRIGHT_SQUARE_BRACKET {$op="cgz";} | VERTICAL_LINE_GRAVE_ACCENTs {$op="cress";} | ASTERISKr {$op="cstv";} | FULL_STOPs {$op="cvsca";} | Dist {$op="cds";} | VERTICAL_LINE_GRAVE_ACCENTt {$op="crest";} | RR_ASTERISKs {$op="cxrs";} | QTop {$op="cqtop";} | MrCls {$op="cmrc";} | Id {$op="ccid";} | Comf {$op="ccomf";} | OppCat {$op="coppc";} | Inv {$op="cinv";} | Func {$op="cfunc";} | IdFunc {$op="cidfu";} | VERTICAL_LINE_GRAVE_ACCENTf {$op="cresf";} | FuncCat {$op="cfuc";} | Arrow {$op="carw";} | CompA {$op="ccoa";} | PairF {$op="cprf";} | Yon {$op="cyon";} | Preset {$op="cpreset";} | Meet {$op="cmee";} | ToInc {$op="cipo";} | PosetRel {$op="cps";} | Infw {$op="cinf";} | LatRel {$op="cla";} | LSSum {$op="clsm";} | FreeGrp {$op="cfrgp";} | CycGrp {$op="ccyg";} | DIGIT_ONEr {$op="cur";} | DivRing {$op="cdr";} | SubRing {$op="csubrg";} | RingSpan {$op="crgspn";} | LSpan {$op="clspn";} | RingLMod {$op="crglmod";} | MetOpen {$op="cmopn";} | ZMod {$op="czlm";} | Chr {$op="cchr";} | PreHil {$op="cphl";} | Hil {$op="chs";} | OBasis {$op="cobs";} | Cls {$op="ccl";} | CnP {$op="ccnp";} | KolDIGIT_TWO {$op="ct0";} | Nrm {$op="cnrm";} | KQ {$op="ckq";} | Homeo {$op="chmeo";} | TILDE_EQUALS_SIGN {$op="chmph";} | UFL {$op="cufl";} | FClusf {$op="cfcf";} | TopMnd {$op="ctmd";} | Tsums {$op="ctsu";} | TopDRing {$op="ctdrg";} | ToMetSp {$op="ctmt";} | Norm {$op="cnm";} | NGHom {$op="cnghm";} | HYPHEN_MINUScnHYPHEN_MINUS_GREATER_THAN_SIGN {$op="ccncf";} | ASTERISKp {$op="cpco";} | ToCHil {$op="ctch";} | Ban {$op="cbn";} | CHil {$op="chl";} | LimCC {$op="climc";} | DegDIGIT_ONE {$op="cdg1";} | AA {$op="caa";} | TILDE_TILDE_GREATER_THAN_SIGNu {$op="culm";} | Arctan {$op="catan";} | Plig {$op="cplig";} | GrpOp {$op="cgr";} | SOLIDUSg {$op="cgs";} | AbelOp {$op="cablo";} | SemiGrp {$op="csem";} | MndOp {$op="cmndo";} | RingOps {$op="crngo";} | IndMet {$op="cims";} | LnOp {$op="clno";} | CBan {$op="ccbn";} | PLUS_SIGNH {$op="cph";} | Normfn {$op="cnmf";} | Adjh {$op="cado";} | Eigval {$op="cel";} | CHStates {$op="chst";} | Measures {$op="cmeas";} | RRndVar {$op="crrv";} | LogLOW_LINEG {$op="clgam";} | PCon {$op="cpcon";} | UMGrph {$op="cumg";} | REVERSE_SOLIDUS_SOLIDUSg {$op="cgoo";} | LESS_THAN_SIGN_HYPHEN_MINUS_GREATER_THAN_SIGNg {$op="cgob";} | AxExt {$op="cgze";} | AxRep {$op="cgzr";} | AxUn {$op="cgzu";} | AxReg {$op="cgzg";} | SplitFldDIGIT_ONE {$op="csf1";} | SplitFld {$op="csf";} | PolySplitLim {$op="cpsl";} | ZRing {$op="czr";} | TASTERISKrec {$op="crtrcl";} | Limits {$op="climits";} | Singleton {$op="csingle";} | Apply {$op="capply";} | Cup {$op="ccup";} | Restrict {$op="crestrict";} | OuterFiveSeg {$op="cofs";} | CgrDIGIT_THREE {$op="ccgr3";} | FiveSeg {$op="cfs";} | CurDIGIT_ONE {$op="ccur1";} | AntiDir {$op="cantidir";} | BndLat {$op="clbl";} | CIRCUMFLEX_ACCENTmd {$op="clsg";} | ZeroDiv {$op="czerodiv";} | Action {$op="cact";} | Vec {$op="cvec";} | RAffSp {$op="craffsp";} | IsolatedPt {$op="cisopt";} | Dgra {$op="cmgra";} | Alg {$op="calg";} | IdLOW_LINE {$op="cid_";} | Ded {$op="cded";} | CinvOLD {$op="ccinv";} | FuncOLD {$op="cfuncOLD";} | InitObj {$op="ciobj";} | Natural {$op="cntrl";} | Tar {$op="ctar";} | DWords {$op="cdwords";} | Grammar {$op="cgrm";} | Prdct {$op="cgprdct";} | Phc {$op="cphc";} | PLines {$op="cplines";} | C_on {$op="ccon2";} | Segments {$op="cSeg";} | Angle {$op="cangle";} | Angc {$op="cangc";} | Cut {$op="ccut";} | LocFin {$op="clocfin";} | TotBnd {$op="ctotbnd";} | TILDE_EQUALS_SIGNr {$op="crisc";} | MzPolyCld {$op="cmzpcl";} | MzPoly {$op="cmzp";} | Dioph {$op="cdioph";} | LEFT_SQUARE_BRACKET_RIGHT_SQUARE_BRACKETNN {$op="csquarenn";} | LFinGen {$op="clfig";} | FreeLMod {$op="cfrlm";} | Monic {$op="cmnc";} | PmSgn {$op="cpsgn";} | CytP {$op="ccytp";} | TopSep {$op="ctopsep";} | Walks {$op="cwalk";} | SPaths {$op="cspath";} | PathOn {$op="cpthon";} | GREATER_THAN_SIGN {$op="cgt";} | Cosh {$op="ccosh";} | FULL_STOP {$op="cdp";} | Ceiling {$op="ccei";} | LSHyp {$op="clsh";} | LESS_THAN_SIGNo {$op="ccvr";} | HL {$op="chlt";} | LPlanes {$op="clpl";} | Lines {$op="clines";} | LTrn {$op="cltrn";} | TGrp {$op="ctgrp";} | VA {$op="cdjaN";} | DIsoH {$op="cdih";} | LCDual {$op="clcd";} | LEFT_SQUARE_BRACKETC_FULL_STOP_RIGHT_SQUARE_BRACKET {$op="crpss";} | Undef {$op="cund";} | DIGIT_THREEo {$op="c3o";} | PLUS_SIGNc {$op="ccda";} | LESS_THAN_SIGNN {$op="clti";} | DIGIT_ONEQ {$op="c1q";} | DIGIT_ZERO {$op="cc0";} | PLUS_SIGNoo {$op="cpnf";} | FULL_STOPi {$op="cip";} | MrInd {$op="cmri";} | ACS {$op="cacs";} | Nat {$op="cnat";} | XcFULL_STOP {$op="cxpc";} | UncurryF {$op="cuncf";} | Poset {$op="cpo";} | Lub {$op="club";} | Grp {$op="cgrp";} | HYPHEN_MINUSg {$op="csg";} | GrpAct {$op="cga";} | PGrp {$op="cpgp";} | DProj {$op="cdpj";} | CRing {$op="ccrg";} | AbsVal {$op="cabv";} | LMHom {$op="clmhm";} | LVec {$op="clvec";} | EvalSub {$op="ces";} | OrdPwSer {$op="copws";} | SelectVars {$op="cslv";} | Top {$op="ctop";} | Clsd {$op="ccld";} | Int {$op="cnt";} | TX {$op="ctx";} | TopGrp {$op="ctgp";} | NrmVec {$op="cnvc";} | DIGIT_ZEROp {$op="c0p";} | C_CIRCUMFLEX_ACCENTn {$op="ccpn";} | MDeg {$op="cmdg";} | Deg {$op="cdgr";} | Log {$op="clog";} | I_nv {$op="cgn";} | DivRingOps {$op="cdrng";} | ASTERISK_HYPHEN_MINUSFld {$op="csfld";} | ComDIGIT_TWO {$op="ccm2";} | HmOp {$op="chmo";} | DIGIT_ZEROh {$op="c0v";} | FULL_STOPop {$op="chot";} | LinFn {$op="clf";} | EulPaths {$op="ceup";} | VDeg {$op="cvdg";} | AxPow {$op="cgzp";} | HomLimB {$op="chlb";} | Prj {$op="cproj";} | CurDIGIT_TWO {$op="ccur2";} | LeR {$op="cse";} | ComDIGIT_ONE {$op="ccm1";} | SubSemiGrpGen {$op="csbsgrg";} | FreeSemiGrp {$op="cfsm";} | SubVec {$op="csvec";} | FULL_STOPm {$op="csmat";} | TermObj {$op="ctobj";} | IdSetCat {$op="cidcase";} | Words {$op="cwrd";} | Sym {$op="csym";} | SOLIDUS_REVERSE_SOLIDUSc {$op="cands";} | Psc {$op="clpsc";} | Ors {$op="cors";} | Prop {$op="cprop";} | PPoints {$op="cpoints";} | Coln {$op="ccol";} | Ibg {$op="cibg";} | Slices {$op="cslices";} | Ismty {$op="cismty";} | Dmn {$op="cdmn";} | NoeACS {$op="cnacs";} | LIndS {$op="clinds";} | MinPolyAA {$op="cmpaa";} | IntgOver {$op="citgo";} | MaDet {$op="cmdat";} | Neighbors {$op="cnbgra";} | Paths {$op="cpath";} | Sinh {$op="csinh";} | LFnl {$op="clfn";} | Pmap {$op="cpmap";} | LAut {$op="claut";} | EDRing {$op="cedring";} | DVecH {$op="cdvh";} | HGMap {$op="chg";} | LOW_LINEE {$op="cep";} | Om {$op="com";} | DIGIT_TWOnd {$op="c2nd";} | R_DIGIT_ONE {$op="cr1";} | Aleph {$op="cale";} | Cf {$op="ccf";} | WUni {$op="cwun";} | TarskiMap {$op="ctskm";} | PLUS_SIGNN {$op="cpli";} | LESS_THAN_SIGNP {$op="cltp";} | R_FULL_STOP {$op="cnr";} | DIGIT_ZEROR {$op="c0r";} | LESS_THAN_SIGNRR {$op="cltrr";} | SOLIDUS {$op="cdiv";} | DIGIT_SEVEN {$op="c7";} | DIGIT_EIGHT {$op="c8";} | EXCLAMATION_MARK {$op="cfa";} | Re {$op="cre";} | Pi {$op="cpi";} | VERTICAL_LINE_VERTICAL_LINE {$op="cdivides";} | Numer {$op="cnumer";} | PCnt {$op="cpc";} | MonoAP {$op="cvdwm";} | Scalar {$op="csca";} | TopSet {$op="cts";} | TopOpen {$op="ctopn";} | QUOTATION_MARKs {$op="cimas";} | Epi {$op="cepi";} | CatCat {$op="ccatc";} | DiagFunc {$op="cdiag";} | Glb {$op="cglb";} | Supw {$op="cspw";} | PLUS_SIGNf {$op="cplusf";} | PSyl {$op="cslw";} | SOLIDUSr {$op="cdvr";} | LIdeal {$op="clidl";} | MPoly {$op="cmpl";} | PwSerDIGIT_ONE {$op="cps1";} | PolyDIGIT_ONE {$op="cpl1";} | ToHL {$op="cthl";} | Nei {$op="cnei";} | KGen {$op="ckgen";} | FBas {$op="cfbas";} | FilGen {$op="cfg";} | FilMap {$op="cfm";} | FLim {$op="cflim";} | FLimf {$op="cflf";} | FClus {$op="cfcls";} | TopMod {$op="ctlm";} | ToNrmGrp {$op="ctng";} | PiDIGIT_ONE {$op="cpi1";} | PiN {$op="cpin";} | CMod {$op="cclm";} | CauFil {$op="ccfil";} | Vol {$op="cvol";} | QuotDIGIT_ONEp {$op="cq1p";} | IdlGenDIGIT_ONEp {$op="cig1p";} | Quot {$op="cquot";} | Tayl {$op="ctayl";} | Theta {$op="ccht";} | Ass {$op="cass";} | Fld {$op="cfld";} | BaseSet {$op="cba";} | NormCV {$op="cnmcv";} | DIGIT_ZEROop {$op="c0o";} | Normh {$op="cno";} | Iop {$op="chio";} | ConOp {$op="ccop";} | States {$op="cst";} | BrSiga {$op="cbrsiga";} | EQUALS_SIGNg {$op="cgoq";} | GF_LOW_LINEoo {$op="cgfo";} | Zp {$op="czp";} | Img {$op="cimg";} | Domain {$op="cdomain";} | SegLESS_THAN_SIGN_LOW_LINE {$op="csegle";} | Ray {$op="cray";} | Mxl {$op="cmxl";} | SemiGrpHom {$op="csmhom";} | Tofld {$op="ctofld";} | FLimfrs {$op="cflimfrs";} | TopFld {$op="ctopfld";} | HYPHEN_MINUScv {$op="cmcv";} | Der {$op="cder";} | SumObj {$op="csumo";} | RoSetCat {$op="crocase";} | Notc {$op="cnotc";} | Idl {$op="cidl";} | MaxIdl {$op="cmaxidl";} | LNoeM {$op="clnm";} | LEFT_PARENTHESIS_PLUS_SIGN_RIGHT_PARENTHESISm {$op="cdsmm";} | UnitVec {$op="cuvc";} | MaMul {$op="cmmul";} | ComplUSGrph {$op="ccusgra";} | WalkOn {$op="cwlkon";} | LKer {$op="clk";} | LDual {$op="cld";} | Cm {$op="ccmtN";} | Atoms {$op="catm";} | LOW_LINE_VERTICAL_LINE_LOW_LINEP {$op="cpolN";} | LPol {$op="clpoN";} | LEFT_PARENTHESIS_SOLIDUS_RIGHT_PARENTHESIS {$op="c0";} | FULL_STOPo {$op="comu";} | CIRCUMFLEX_ACCENTpm {$op="cpm";} | TILDE_LESS_THAN_SIGN_LOW_LINE_ASTERISK {$op="cwdom";} | Univ {$op="cgru";} | N_FULL_STOP {$op="cnpi";} | LESS_THAN_SIGNpQ {$op="cltpq";} | ASTERISKQ {$op="crq";} | HYPHEN_MINUS_DIGIT_ONER {$op="cm1r";} | LOW_LINEi {$op="ci";} | HYPHEN_MINUS {$op="cmin";} | NN_DIGIT_ZERO {$op="cn0";} | QQ {$op="cq";} | LEFT_PARENTHESIS_COMMA_RIGHT_PARENTHESIS {$op="cioo";} | LEFT_SQUARE_BRACKET_COMMA_RIGHT_SQUARE_BRACKET {$op="cicc";} | FULL_STOP_FULL_STOP_CIRCUMFLEX_ACCENT {$op="cfzo";} | Sqr {$op="csqr";} | Bits {$op="cbits";} | Ndx {$op="cnx";} | Oc {$op="coc";} | Hom {$op="chom";} | OrdTop {$op="cordt";} | Moore {$op="cmre";} | IdA {$op="cida";} | HomF {$op="chof";} | SubMnd {$op="csubmnd";} | VarFMnd {$op="cvrmd";} | SymGrp {$op="csymg";} | Ring {$op="crg";} | OppR {$op="coppr";} | Unit {$op="cui";} | ASTERISKrf {$op="cstf";} | ASTERISKRing {$op="csr";} | TILDE_EQUALS_SIGNm {$op="clmic";} | EvalDIGIT_ONE {$op="ce1";} | CoeDIGIT_ONE {$op="cco1";} | ASTERISKMet {$op="cxmt";} | Ball {$op="cbl";} | Z_SOLIDUSnZ {$op="czn";} | TopSpOLD {$op="ctpsOLD";} | TopSp {$op="ctps";} | Con {$op="ccon";} | DIGIT_TWOndc {$op="c2ndc";} | TopRing {$op="ctrg";} | MetSp {$op="cmt";} | NrmRing {$op="cnrg";} | PHtpy {$op="cphtpy";} | VolASTERISK {$op="covol";} | Ana {$op="cana";} | RPrime {$op="crpm";} | TASTERISK {$op="crtcl";} | Magma {$op="cmagm";} | BLnOp {$op="cblo";} | PLUS_SIGNh {$op="cva";} | TILDE_TILDE_GREATER_THAN_SIGNv {$op="chli";} | CH {$op="cch";} | LOW_LINE_VERTICAL_LINE_LOW_LINE {$op="cort";} | Span {$op="cspn";} | DIGIT_ZEROH {$op="c0h";} | Normop {$op="cnop";} | MH {$op="cmd";} | Logb {$op="clogb";} | SigaGen {$op="csigagen";} | SCon {$op="cscon";} | EFULL_STOPg {$op="cgoe";} | Fmla {$op="cfmla";} | GF {$op="cgf";} | CIRCUMFLEX_ACCENTr {$op="crelexp";} | Colinear {$op="ccolin";} | OutsideOf {$op="coutsideof";} | Pr {$op="cpro";} | OrHom {$op="corhom";} | SubSemiGrp {$op="csubsmg";} | IdlNEW {$op="cidln";} | RVec {$op="cvr";} | PLUS_SIGNm {$op="cmmat";} | DomLOW_LINE {$op="cdom_";} | CodLOW_LINE {$op="ccod_";} | T_r {$op="ctr";} | GraphSetCat {$op="cgraphcase";} | IndCls {$op="clincl";} | HYPHEN_MINUS_FULL_STOPc {$op="cnots";} | Trcng {$op="ctrcng";} | RngHom {$op="crnghom";} | PolyLESS_THAN_SIGN {$op="cplylt";} | PmTrsp {$op="cpmtr";} | Cycles {$op="ccycl";} | Tanh {$op="ctanh";} | OML {$op="coml";} | LLines {$op="clln";} | LHyp {$op="clh";} | OcA {$op="cocaN";} | DIsoC {$op="cdic";} | HVMap {$op="chvm";} | LOW_LINEV {$op="cvv";} | LOW_LINEI {$op="cid";} | PLUS_SIGNo {$op="coa";} | CNF {$op="ccnf";} | Rank {$op="crnk";} | FinDIGIT_THREE {$op="cfin3";} | FULL_STOPP_FULL_STOP {$op="cmp";} | TILDER {$op="cer";} | FULL_STOPR {$op="cmr";} | PLUS_SIGN {$op="caddc";} | DIGIT_FIVE {$op="c5";} | DIGIT_ONE_DIGIT_ZERO {$op="c10";} | LOW_LINEe {$op="ceu";} | FULL_STOPr {$op="cmulr";} | XsLOW_LINE {$op="cprds";} | Gsum {$op="cgsu";} | CodA {$op="ccoda";} | DIGIT_TWOndF {$op="c2ndf";} | DLat {$op="cdlat";} | TILDEQG {$op="cqg";} | ProjDIGIT_ONE {$op="cpj1";} | Field {$op="cfield";} | FULL_STOPsf {$op="cscaf";} | AssAlg {$op="casa";} | ToPolyDIGIT_ONE {$op="ctp1";} | ZRHom {$op="czrh";} | Ocv {$op="cocv";} | Fre {$op="ct1";} | Haus {$op="cha";} | CNrm {$op="ccnrm";} | Fil {$op="cfil";} | OmDIGIT_ONE {$op="comi";} | MblFn {$op="cmbf";} | S_FULL_STOP_DIGIT_ONE {$op="citg1";} | MonicDIGIT_ONEp {$op="cmn1";} | RemDIGIT_ONEp {$op="cr1p";} | CIRCUMFLEX_ACCENTc {$op="ccxp";} | Gamma {$op="cem";} | Psi {$op="cchp";} | DChr {$op="cdchr";} | NrmCVec {$op="cnv";} | NormOpOLD {$op="cnmoo";} | C_LOW_LINEH {$op="ccm";} | Projh {$op="cpjh";} | HrmOp {$op="cho";} | Null {$op="cnl";} | ConFn {$op="ccnfn";} | SOLIDUSe {$op="cxdiv";} | Prob {$op="cprb";} | Cprob {$op="ccprob";} | CplMetSp {$op="ccpms";} | LOW_LINEQp {$op="cqpa";} | LinesEE {$op="clines2";} | OrIso {$op="coriso";} | Mnl {$op="cmnl";} | Xm {$op="cxmat";} | UnifSp {$op="cunifsp";} | Frf {$op="cfrf";} | Intvl {$op="cintvl";} | Pc {$op="cPc";} | Triangle {$op="ctriangle";} | Ss {$op="csas";} | Segc {$op="csegc";} | IdlGen {$op="cigen";} | PellDIGIT_ONE_DIGIT_TWO_DIGIT_THREE_DIGIT_FOURQR {$op="cpell1234qr";} | PellFund {$op="cpellfund";} | PLUS_SIGNr {$op="cplusr";} | USGrph {$op="cusg";} | GREATER_THAN_SIGN_LOW_LINE {$op="cge-real";} | LVols {$op="clvol";} | PLUS_SIGNP {$op="cpadd";} | PSubCl {$op="cpscN";} | LDil {$op="cldil";} | DIsoA {$op="cdia";} | HLHil {$op="chlh";} | DIGIT_ONEo {$op="c1o";} | FinDIGIT_ONEa {$op="cfin1a";} | FinDIGIT_FOUR {$op="cfin4";} | FinDIGIT_FIVE {$op="cfin5";} | InaccW {$op="cwina";} | Inacc {$op="cina";} | WUniCl {$op="cwunm";} | DIGIT_ONEP {$op="c1p";} | DIGIT_FOUR {$op="c4";} | ZZ {$op="cz";} | PLUS_SIGNe {$op="cxad";} | LEFT_PARENTHESIS_COMMA_RIGHT_SQUARE_BRACKET {$op="cioc";} | NUMBER_SIGN {$op="chash";} | Im {$op="cim";} | Abs {$op="cabs";} | Limsup {$op="clsp";} | Smul {$op="csmu";} | Denom {$op="cdenom";} | PolyAP {$op="cvdwp";} | Unif {$op="cunif";} | TopGen {$op="ctg";} | Cat {$op="ccat";} | C_LOW_LINEcat {$op="cssc";} | DIGIT_ONEstF {$op="c1stf";} | EvalF {$op="cevlf";} | DIGIT_ZERO_FULL_STOP {$op="cp0";} | Mnd {$op="cmnd";} | TILDE_EQUALS_SIGNg {$op="cgic";} | Od {$op="cod";} | Irred {$op="cir";} | LMod {$op="clmod";} | LBasis {$op="clbs";} | DIGIT_TWOIdeal {$op="c2idl";} | LPIdeal {$op="clpidl";} | RLReg {$op="crlreg";} | AlgSpan {$op="casp";} | AlgInd {$op="cai";} | VarDIGIT_ONE {$op="cv1";} | Met {$op="cme";} | FULL_STOPif {$op="cipf";} | LimPt {$op="clp";} | Cn {$op="ccn";} | Comp {$op="ccmp";} | NrmMod {$op="cnlm";} | II {$op="cii";} | TILDE_EQUALS_SIGNph {$op="cphtpc";} | CPreHil {$op="ccph";} | LOW_LINED {$op="cdv";} | Mmu {$op="cmu";} | GrpOpIso {$op="cgiso";} | PLUS_SIGNv {$op="cpv";} | FULL_STOPiOLD {$op="cdip";} | CHilOLD {$op="chlo";} | FULL_STOPih {$op="csp";} | DIGIT_ZEROhop {$op="ch0o";} | UniOp {$op="cuo";} | Ketbra {$op="ck";} | SX {$op="csx";} | MblFnM {$op="cmbfm";} | IblFnM {$op="cibfm";} | CovMap {$op="ccvm";} | Sat {$op="csat";} | SOLIDUS_REVERSE_SOLIDUSg {$op="cgoa";} | PolyFld {$op="cpfl";} | TILDEQp {$op="ceqp";} | Cp {$op="ccp";} | Funs {$op="cfuns";} | Cap {$op="ccap";} | Cgr {$op="ccgr";} | BernPoly {$op="cbp";} | Ge {$op="cge";} | HYPHEN_MINUSucv {$op="cnegcv";} | FULL_STOPcv {$op="csmcv";} | H_om {$op="chomOLD";} | Epic {$op="cepiOLD";} | MonoOLD {$op="cmonOLD";} | SubCat {$op="csubcat";} | DomSetCat {$op="cdomcase";} | Ig {$op="cig";} | Seg {$op="cseg";} | Circle {$op="ccircle";} | Fne {$op="cfne";} | Bnd {$op="cbnd";} | CRingOps {$op="ccring";} | PellDIGIT_ONE_DIGIT_FOURQR {$op="cpell14qr";} | RmX {$op="crmx";} | LNoeR {$op="clnr";} | Mat {$op="cmat";} | FULL_STOPv {$op="ctimesr";} | RR_DIGIT_THREE {$op="crr3c";} | UnivVertex {$op="cuvtx";} | FriendGrph {$op="cfrgra";} | OL {$op="col";} | Dil {$op="cdilN";} | JoinH {$op="cdjh";} | DIGIT_ONEst {$op="c1st";} | CIRCUMFLEX_ACCENTm {$op="cmap";} | Fin {$op="cfn";} | FinDIGIT_SEVEN {$op="cfin7";} | GCH {$op="cgch";} | FULL_STOPN {$op="cmi";} | TILDEQ {$op="ceq";} | PLUS_SIGNpR {$op="cplpr";} | RR_ASTERISK {$op="cxr";} | LESS_THAN_SIGN_LOW_LINE {$op="cle";} | NN {$op="cn";} | ZZ_GREATER_THAN_SIGN_EQUALS_SIGN {$op="cuz";} | ASTERISKe {$op="cxmu";} | LOW_LINEC {$op="cbc";} | Shift {$op="cshi";} | ASTERISK {$op="ccj";} | TILDE_TILDE_GREATER_THAN_SIGN {$op="cli";} | Prime {$op="cprime";} | AP {$op="cvdwa";} | Ramsey {$op="cram";} | Base {$op="cbs";} | PLUS_SIGNg {$op="cplusg";} | XtLOW_LINE {$op="cpt";} | SOLIDUSs {$op="cqus";} | Homf {$op="chomf";} | Sect {$op="csect";} | VERTICAL_LINE_GRAVE_ACCENTcat {$op="cresc";} | Full {$op="cful";} | Faith {$op="cfth";} | DomA {$op="cdoma";} | CurryF {$op="ccurf";} | DirRel {$op="cdir";} | Invg {$op="cminusg";} | FULL_STOPg {$op="cmg";} | FreeMnd {$op="cfrmd";} | GrpHom {$op="cghm";} | GEx {$op="cgex";} | TILDEFG {$op="cefg";} | VERTICAL_LINE_VERTICAL_LINEr {$op="cdsr";} | Invr {$op="cinvr";} | RingHom {$op="crh";} | LSubSp {$op="clss";} | NzRing {$op="cnzr";} | IDomn {$op="cidom";} | MVar {$op="cmvr";} | MHomP {$op="cmhp";} | CSubSp {$op="ccss";} | Perf {$op="cperf";} | Reg {$op="creg";} | CIRCUMFLEX_ACCENTko {$op="cxko";} | NrmGrp {$op="cngp";} | NormOp {$op="cnmo";} | NMHom {$op="cnmhm";} | L_CIRCUMFLEX_ACCENT_DIGIT_ONE {$op="cibl";} | Poly {$op="cply";} | Arcsin {$op="casin";} | SubGrpOp {$op="csubgo";} | Adj {$op="caj";} | CPreHilOLD {$op="ccphlo";} | HYPHEN_MINUSh {$op="cmv";} | SH {$op="csh";} | REVERSE_SOLIDUS_SOLIDUSH {$op="chsup";} | PLUS_SIGNfn {$op="chfs";} | LOW_LINEInd {$op="cind";} | Zeta {$op="czeta";} | LOW_LINEG {$op="cgam";} | HomLim {$op="chlim";} | Bday {$op="cbday";} | SSet {$op="csset";} | Range {$op="crange";} | Succ {$op="csuccf";} | Hf {$op="chf";} | Cset {$op="ccst";} | LatAlg {$op="clatalg";} | TopX {$op="ctopx";} | Opfn {$op="copfn";} | SOLIDUScv {$op="cdivcv";} | OLOW_LINE {$op="co_";} | Isofunc {$op="cifunc";} | LimCat {$op="clmct";} | Kleene {$op="ckln";} | Conc {$op="cconc";} | REVERSE_SOLIDUS_SOLIDUSc {$op="clors";} | LESS_THAN_SIGN_EQUALS_SIGN_GREATER_THAN_SIGNc {$op="cbis";} | L_ine {$op="cline";} | Btw {$op="cbtw";} | R_ay {$op="cray2";} | Ref {$op="cref";} | PrIdl {$op="cpridl";} | LIndF {$op="clindf";} | MaAdju {$op="cmadu";} | MEndo {$op="cmend";} | SubDRing {$op="csdrg";} | TopLnd {$op="ctoplnd";} | USLGrph {$op="cuslg";} | Csc {$op="ccsc";} | Sgn {$op="csgn";} | LSAtoms {$op="clsa";} | LESS_THAN_SIGNoL {$op="clcv";} | AtLat {$op="cal";} | TEndo {$op="ctendo";} | DVecA {$op="cdveca";} | OcH {$op="coch";} | HDMapDIGIT_ONE {$op="chdma1";} | HDMap {$op="chdma";} | On {$op="con0";} | FinDIGIT_TWO {$op="cfin2";} | FinDIGIT_SIX {$op="cfin6";} | Tarski {$op="ctsk";} | PLUS_SIGNpQ {$op="cplpq";} | PLUS_SIGNQ {$op="cplq";} | P_FULL_STOP {$op="cnp";} | PLUS_SIGNP_FULL_STOP {$op="cpp";} | LESS_THAN_SIGNR {$op="cltr";} | RR {$op="cr";} | XFULL_STOP {$op="cmul";} | DIGIT_THREE {$op="c3";} | RR_PLUS_SIGN {$op="crp";} | VERTICAL_LINE_LOW_LINE {$op="cfl";} | CIRCUMFLEX_ACCENT {$op="cexp";} | LESS_THAN_SIGN_LOW_LINEO_LEFT_PARENTHESIS_DIGIT_ONE_RIGHT_PARENTHESIS {$op="clo1";} | Phi {$op="cphi";} | S_Set {$op="csts";} | Le {$op="cple";} | CIRCUMFLEX_ACCENTs {$op="cpws";} | Mono {$op="cmon";} | Subcat {$op="csubc";} | OFULL_STOPfunc {$op="ccofu";} | Lt {$op="cplt";} | Join {$op="cjn";} | DIGIT_ONE_FULL_STOP {$op="cp1";} | MndHom {$op="cmhm";} | SubGrp {$op="csubg";} | NrmSGrp {$op="cnsg";} | Cntz {$op="ccntz";} | CMnd {$op="ccmn";} | Abel {$op="cabel";} | LMIso {$op="clmim";} | SubringAlg {$op="csra";} | RSpan {$op="crsp";} | LPIR {$op="clpir";} | Domn {$op="cdomn";} | AlgSc {$op="cascl";} | MPwSer {$op="cmps";} | MPSDer {$op="cpsd";} | LESS_THAN_SIGNbag {$op="cltb";} | CCfld {$op="ccnfld";} | TopOn {$op="ctopon";} | PNrm {$op="cpnrm";} | DIGIT_ONEstc {$op="c1stc";} | Htpy {$op="chtpy";} | CMet {$op="cms";} | S_FULL_STOP_DIGIT_TWO {$op="citg2";} | Dn {$op="cdvn";} | UnicDIGIT_ONEp {$op="cuc1p";} | Xp {$op="cidp";} | Arccos {$op="cacos";} | Ppi {$op="cppi";} | Sigma {$op="csgm";} | GrpOpHom {$op="cghom";} | DIGIT_ZEROvec {$op="cn0v";} | HYPHEN_MINUSv {$op="cnsb";} | FULL_STOPh {$op="csm";} | VH {$op="chj";} | HYPHEN_MINUSop {$op="chod";} | BndLinOp {$op="cbo";} | Eigvec {$op="cei";} | Lambda {$op="cspc";} | LESS_THAN_SIGNoH {$op="ccv";} | HAtoms {$op="cat";} | MH_ASTERISK {$op="cdmd";} | VERTICAL_LINEg {$op="cgna";} | SatE {$op="csate";} | VERTICAL_LINE_EQUALS_SIGN {$op="cprv";} | AxInf {$op="cgzi";} | ZF {$op="cgzf";} | IntgRing {$op="citr";} | SOLIDUSQp {$op="crqp";} | LESS_THAN_SIGNs {$op="cslt";} | Trans {$op="ctrans";} | Singletons {$op="csingles";} | Cart {$op="ccart";} | EE {$op="cee";} | PLUS_SIGNcv {$op="cplcv";} | CatOLD {$op="ccatOLD";} | IsoOLD {$op="cisoOLD";} | Source {$op="csrce";} | Sink {$op="csnk";} | MorphismSetCat {$op="ccmrcase";} | EQUALS_SIGN_GREATER_THAN_SIGNc {$op="cimps";} | Andc {$op="candc";} | Impc {$op="cimpc";} | Convex {$op="cconvex";} | Ibcg {$op="cibcg";} | Angtrg {$op="cangtrg";} | PtFin {$op="cptfin";} | RngIso {$op="crngiso";} | PellDIGIT_ONEQR {$op="cpell1qr";} | LdgIdlSeq {$op="cldgis";} | DegAA {$op="cdgraa";} | LOW_LINEZZ {$op="cza";} | HYPHEN_MINUSr {$op="cminusr";} | TrailOn {$op="ctrlon";} | Sec {$op="csec";} | Cot {$op="ccot";} | OP {$op="cops";} | Points {$op="cpointsN";} | PSubSp {$op="cpsubsp";} | PCl {$op="cpclN";} | WAtoms {$op="cwpointsN";} | PAut {$op="cpautN";} | TrL {$op="ctrl";} | DIsoB {$op="cdib";} ; cC3 locals[String op] : LESS_THAN_SIGN_QUOTATION_MARK c c c QUOTATION_MARK_GREATER_THAN_SIGN {$op="cs3";} | TrPred LEFT_PARENTHESIS c COMMA c COMMA c RIGHT_PARENTHESIS {$op="ctrpred";} | LESS_THAN_SIGN_FULL_STOP c COMMA c COMMA c GREATER_THAN_SIGN_FULL_STOP {$op="cotp";} | S_FULL_STOPm c c LOW_LINEd c {$op="citgm";} | LEFT_PARENTHESIS_LEFT_PARENTHESIS c c c RIGHT_PARENTHESIS_RIGHT_PARENTHESIS {$op="caov";} | LOW_LINEpred LEFT_PARENTHESIS c COMMA c COMMA c RIGHT_PARENTHESIS {$op="c-bnj14";} | Pred LEFT_PARENTHESIS c COMMA c COMMA c RIGHT_PARENTHESIS {$op="cpred";} | LEFT_PARENTHESIS c c c RIGHT_PARENTHESIS {$op="co";} | LOW_LINEtrCl LEFT_PARENTHESIS c COMMA c COMMA c RIGHT_PARENTHESIS {$op="c-bnj18";} | Sup LEFT_PARENTHESIS c COMMA c COMMA c RIGHT_PARENTHESIS {$op="csup";} | Seq c LEFT_PARENTHESIS c COMMA c RIGHT_PARENTHESIS {$op="cseq";} | LEFT_CURLY_BRACKET c COMMA c COMMA c RIGHT_CURLY_BRACKET {$op="ctp";} ; cC7 locals[String op] : LESS_THAN_SIGN_QUOTATION_MARK c c c c c c c QUOTATION_MARK_GREATER_THAN_SIGN {$op="cs7";} ; cSC2 locals[String op] : InfLOW_LINE s EFULL_STOP c c {$op="clinfp";} | X_LOW_LINE s EFULL_STOP c c {$op="cixp";} | LEFT_PARENTHESIS s EFULL_STOP c VERTICAL_LINE_HYPHEN_MINUS_GREATER_THAN_SIGN c RIGHT_PARENTHESIS {$op="cmpt";} | SumASTERISK s EFULL_STOP c c {$op="cesum";} | SupLOW_LINE s EFULL_STOP c c {$op="clsupp";} | VERTICAL_LINE_CIRCUMFLEX_ACCENT_VERTICAL_LINE_LOW_LINE s EFULL_STOP c c {$op="ciin";} | SumLOW_LINE s EFULL_STOP c c {$op="csu";} | U_LOW_LINE s EFULL_STOP c c {$op="ciun";} | ProdLOW_LINE s EFULL_STOP c c {$op="cprod";} ; wW12 locals[String op] : LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvhc12";} ; w0 locals[String op] : F_FULL_STOP {$op="wfal";} | CHOICE {$op="wac";} | T_FULL_STOP {$op="wtru";} ; wW3 locals[String op] : Hadd LEFT_PARENTHESIS w COMMA w COMMA w RIGHT_PARENTHESIS {$op="whad";} | LEFT_PARENTHESIS w REVERSE_SOLIDUS_SOLIDUS w REVERSE_SOLIDUS_SOLIDUS w RIGHT_PARENTHESIS {$op="w3o";} | LEFT_PARENTHESIS w SOLIDUS_REVERSE_SOLIDUS w SOLIDUS_REVERSE_SOLIDUS w RIGHT_PARENTHESIS {$op="w3a";} | LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvhc3";} | LEFT_PARENTHESIS w HYPHEN_MINUS_SOLIDUS_REVERSE_SOLIDUS w HYPHEN_MINUS_SOLIDUS_REVERSE_SOLIDUS w RIGHT_PARENTHESIS {$op="w3nand";} | Cadd LEFT_PARENTHESIS w COMMA w COMMA w RIGHT_PARENTHESIS {$op="wcad";} | LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w HYPHEN_MINUS_GREATER_THAN_SIGN_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvd2";} ; wS2 locals[String op] : s EFULL_STOP s {$op="wel";} | s EQUALS_SIGN s {$op="weq";} ; cS3W locals[String op] : LEFT_CURLY_BRACKET LESS_THAN_SIGN_FULL_STOP LESS_THAN_SIGN_FULL_STOP s COMMA s GREATER_THAN_SIGN_FULL_STOP COMMA s GREATER_THAN_SIGN_FULL_STOP VERTICAL_LINE w RIGHT_CURLY_BRACKET {$op="coprab";} ; cSCSC2 locals[String op] : LEFT_PARENTHESIS s EFULL_STOP c COMMA s EFULL_STOP c VERTICAL_LINE_HYPHEN_MINUS_GREATER_THAN_SIGN c RIGHT_PARENTHESIS {$op="cmpt2";} ; cC4 locals[String op] : LESS_THAN_SIGN_QUOTATION_MARK c c c c QUOTATION_MARK_GREATER_THAN_SIGN {$op="cs4";} ; cC5 locals[String op] : LESS_THAN_SIGN_QUOTATION_MARK c c c c c QUOTATION_MARK_GREATER_THAN_SIGN {$op="cs5";} ; wW4 locals[String op] : LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w HYPHEN_MINUS_GREATER_THAN_SIGN_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvd3";} | LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvhc4";} | LEFT_PARENTHESIS w SOLIDUS_REVERSE_SOLIDUS w SOLIDUS_REVERSE_SOLIDUS w SOLIDUS_REVERSE_SOLIDUS w RIGHT_PARENTHESIS {$op="w-bnj17";} ; wW6 locals[String op] : LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvhc6";} ; wW9 locals[String op] : LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvhc9";} ; wSCW locals[String op] : E_ASTERISK s EFULL_STOP c w {$op="wrmo";} | E_EXCLAMATION_MARK s EFULL_STOP c w {$op="wreu";} | A_FULL_STOP s EFULL_STOP c w {$op="wral";} | E_FULL_STOP s EFULL_STOP c w {$op="wrex";} ; wS2W locals[String op] : LEFT_SQUARE_BRACKET s SOLIDUS s RIGHT_SQUARE_BRACKET w {$op="wsb";} | CondEq LEFT_PARENTHESIS s EQUALS_SIGN s HYPHEN_MINUS_GREATER_THAN_SIGN w RIGHT_PARENTHESIS {$op="wcdeq";} ; cSCW locals[String op] : LEFT_PARENTHESIS IotaLOW_LINE s EFULL_STOP c w RIGHT_PARENTHESIS {$op="crio";} | LEFT_CURLY_BRACKET s EFULL_STOP c VERTICAL_LINE w RIGHT_CURLY_BRACKET {$op="crab";} ; cSC3 locals[String op] : ProdDIGIT_THREE_LOW_LINE s EFULL_STOP c c c {$op="cgprd3";} | ProdDIGIT_TWO_LOW_LINE s EFULL_STOP c c c {$op="cgprd2";} | GprodLOW_LINE s EFULL_STOP c c c {$op="cgprd";} ; wW8 locals[String op] : LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvhc8";} ; wSC locals[String op] : F_SOLIDUS_LOW_LINE s c {$op="wnfc";} ; wCSW locals[String op] : LEFT_SQUARE_BRACKET_FULL_STOP c SOLIDUS s RIGHT_SQUARE_BRACKET_FULL_STOP w {$op="wsbc";} ; wC3 locals[String op] : c COLON c HYPHEN_MINUSontoHYPHEN_MINUS_GREATER_THAN_SIGN c {$op="wfo";} | c COLON c HYPHEN_MINUS_DIGIT_ONE_HYPHEN_MINUS_DIGIT_ONE_HYPHEN_MINUS_GREATER_THAN_SIGN c {$op="wf1";} | c COLON c HYPHEN_MINUS_DIGIT_ONE_HYPHEN_MINUS_DIGIT_ONE_HYPHEN_MINUSontoHYPHEN_MINUS_GREATER_THAN_SIGN c {$op="wf1o";} | c COLON c HYPHEN_MINUS_HYPHEN_MINUS_GREATER_THAN_SIGN c {$op="wf";} | c c c {$op="wbr";} | LOW_LINETrFo LEFT_PARENTHESIS c COMMA c COMMA c RIGHT_PARENTHESIS {$op="w-bnj19";} ; wW5 locals[String op] : LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvhc5";} ; wW10 locals[String op] : LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvhc10";} ; cC8 locals[String op] : LESS_THAN_SIGN_QUOTATION_MARK c c c c c c c c QUOTATION_MARK_GREATER_THAN_SIGN {$op="cs8";} ; wW11 locals[String op] : LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvhc11";} ; cCSC locals[String op] : LEFT_SQUARE_BRACKET_LOW_LINE c SOLIDUS s RIGHT_SQUARE_BRACKET_LOW_LINE c {$op="csb";} ; cSW locals[String op] : LEFT_PARENTHESIS Iota s w RIGHT_PARENTHESIS {$op="cio";} | LEFT_CURLY_BRACKET s VERTICAL_LINE w RIGHT_CURLY_BRACKET {$op="cab";} ; cC3S locals[String op] : S_LOW_LINE LEFT_SQUARE_BRACKET c HYPHEN_MINUS_GREATER_THAN_SIGN c RIGHT_SQUARE_BRACKET c LOW_LINEd s {$op="cdit";} ; cWC2 locals[String op] : If LEFT_PARENTHESIS w COMMA c COMMA c RIGHT_PARENTHESIS {$op="cif";} ; cS2W locals[String op] : LEFT_CURLY_BRACKET LESS_THAN_SIGN_FULL_STOP s COMMA s GREATER_THAN_SIGN_FULL_STOP VERTICAL_LINE w RIGHT_CURLY_BRACKET {$op="copab";} ; cC2S locals[String op] : S_FULL_STOP c c LOW_LINEd s {$op="citg";} ; cS locals[String op] : s {$op="cv";} ; wC5 locals[String op] : c Isom c COMMA c LEFT_PARENTHESIS c COMMA c RIGHT_PARENTHESIS {$op="wiso";} ; cC6 locals[String op] : LESS_THAN_SIGN_QUOTATION_MARK c c c c c c QUOTATION_MARK_GREATER_THAN_SIGN {$op="cs6";} ; wW7 locals[String op] : LEFT_PARENTHESIS_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w COMMA_FULL_STOP w RIGHT_PARENTHESIS_FULL_STOP {$op="wvhc7";} ; classHolder : VARIABLE_CLASS; setHolder:VARIABLE_SET; wffHolder:VARIABLE_WFF; /* Lexer rules */ VARIABLE_SET : '∨' [0-9]+ 's '; VARIABLE_WFF : '∨' [0-9]+ 'w ' ; VARIABLE_CLASS : '∨' [0-9]+ 'c ' ; AA : 'AA ' ; ACS : 'ACS ' ; AC_LOW_LINE : 'AC_ ' ; AP : 'AP ' ; APOSTROPHE_APOSTROPHE_APOSTROPHE : '\'\'\' ' ; ASTERISK : '* ' ; ASTERISKMet : '*Met ' ; ASTERISKMetSp : '*MetSp ' ; ASTERISKQ : '*Q ' ; ASTERISKRing : '*Ring ' ; ASTERISK_HYPHEN_MINUSFld : '*-Fld ' ; ASTERISKe : '*e ' ; ASTERISKp : '*p ' ; ASTERISKr : '*r ' ; ASTERISKrf : '*rf ' ; A_FULL_STOP : 'A. ' ; A_FULL_STOPg : 'A.g ' ; Abel : 'Abel ' ; AbelOp : 'AbelOp ' ; Abs : 'abs ' ; AbsVal : 'AbsVal ' ; Action : 'Action ' ; Adj : 'adj ' ; Adjh : 'adjh ' ; Aleph : 'aleph ' ; Alg : 'Alg ' ; AlgInd : 'AlgInd ' ; AlgSc : 'algSc ' ; AlgSpan : 'AlgSpan ' ; Ana : 'Ana ' ; Andc : 'andc ' ; Angc : 'angc ' ; Angle : 'angle ' ; Angtrg : 'angtrg ' ; AntiDir : 'AntiDir ' ; Apply : 'Apply ' ; Arccos : 'arccos ' ; Arcsin : 'arcsin ' ; Arctan : 'arctan ' ; Area : 'area ' ; Arrow : 'Arrow ' ; Ass : 'Ass ' ; AssAlg : 'AssAlg ' ; AtLat : 'AtLat ' ; Atoms : 'Atoms ' ; AxExt : 'AxExt ' ; AxInf : 'AxInf ' ; AxPow : 'AxPow ' ; AxReg : 'AxReg ' ; AxRep : 'AxRep ' ; AxUn : 'AxUn ' ; BLnOp : 'BLnOp ' ; Ball : 'ball ' ; Ban : 'Ban ' ; Base : 'Base ' ; BaseSet : 'BaseSet ' ; Bday : 'bday ' ; BernPoly : 'BernPoly ' ; Bic : 'bic ' ; Bigcup : 'Bigcup ' ; Bits : 'bits ' ; Bnd : 'Bnd ' ; BndLat : 'BndLat ' ; BndLinOp : 'BndLinOp ' ; BrSiga : 'BrSiga ' ; Bra : 'bra ' ; Btw : 'btw ' ; Btwn : 'Btwn ' ; CBan : 'CBan ' ; CC : 'CC ' ; CCfld : 'CCfld ' ; CH : 'CH ' ; CHOICE : 'CHOICE ' ; CHStates : 'CHStates ' ; CHil : 'CHil ' ; CHilOLD : 'CHilOLD ' ; CIRCUMFLEX_ACCENT : '^ ' ; CIRCUMFLEX_ACCENTc : '^c ' ; CIRCUMFLEX_ACCENTg : '^g ' ; CIRCUMFLEX_ACCENTko : '^ko ' ; CIRCUMFLEX_ACCENTm : '^m ' ; CIRCUMFLEX_ACCENTmd : '^md ' ; CIRCUMFLEX_ACCENTo : '^o ' ; CIRCUMFLEX_ACCENTpm : '^pm ' ; CIRCUMFLEX_ACCENTr : '^r ' ; CIRCUMFLEX_ACCENTs : '^s ' ; CLat : 'CLat ' ; CMet : 'CMet ' ; CMetSp : 'CMetSp ' ; CMnd : 'CMnd ' ; CMod : 'CMod ' ; CNF : 'CNF ' ; CNrm : 'CNrm ' ; COLON : ': ' ; COMMA : ', ' ; COMMA_FULL_STOP : ',. ' ; CPreHil : 'CPreHil ' ; CPreHilOLD : 'CPreHilOLD ' ; CRing : 'CRing ' ; CRingOps : 'CRingOps ' ; CSubSp : 'CSubSp ' ; CVecOLD : 'CVecOLD ' ; C_CIRCUMFLEX_ACCENTn : 'C^n ' ; C_FULL_STOP : 'C. ' ; C_LOW_LINE : 'C_ ' ; C_LOW_LINEH : 'C_H ' ; C_LOW_LINEcat : 'C_cat ' ; C_omp : 'comp ' ; C_on : 'con ' ; Cadd : 'cadd ' ; Cap : 'Cap ' ; Card : 'card ' ; Cart : 'Cart ' ; Cat : 'Cat ' ; CatCat : 'CatCat ' ; CatOLD : 'CatOLD ' ; Cau : 'Cau ' ; CauFil : 'CauFil ' ; Cauchy : 'Cauchy ' ; Ceiling : 'ceiling ' ; Cf : 'cf ' ; Cgr : 'Cgr ' ; CgrDIGIT_THREE : 'Cgr3 ' ; Chr : 'chr ' ; CinvOLD : 'cinvOLD ' ; Circle : 'Circle ' ; Circuits : 'Circuits ' ; Class : 'class ' ; Cls : 'cls ' ; Clsd : 'Clsd ' ; Cm : 'cm ' ; Cn : 'Cn ' ; CnP : 'CnP ' ; Cntr : 'Cntr ' ; Cntz : 'Cntz ' ; CodA : 'codA ' ; CodLOW_LINE : 'cod_ ' ; CodSetCat : 'codSetCat ' ; CoeDIGIT_ONE : 'coe1 ' ; Coeff : 'coeff ' ; Colinear : 'Colinear ' ; Coln : 'coln ' ; ComDIGIT_ONE : 'Com1 ' ; ComDIGIT_TWO : 'Com2 ' ; Comf : 'comf ' ; Comp : 'Comp ' ; CompA : 'compA ' ; ComplUSGrph : 'ComplUSGrph ' ; Con : 'Con ' ; ConFn : 'ConFn ' ; ConOp : 'ConOp ' ; Conc : 'conc ' ; Concat : 'concat ' ; CondEq : 'CondEq ' ; Convex : 'convex ' ; Cos : 'cos ' ; Cosh : 'cosh ' ; Cot : 'cot ' ; CovMap : 'CovMap ' ; Cp : 'Cp ' ; CplMetSp : 'cplMetSp ' ; Cprob : 'cprob ' ; Csc : 'csc ' ; Cset : 'cset ' ; Cup : 'Cup ' ; CurDIGIT_ONE : 'cur1 ' ; CurDIGIT_TWO : 'cur2 ' ; Curry : 'curry ' ; CurryF : 'curryF ' ; Cut : 'Cut ' ; CvLat : 'CvLat ' ; CycGrp : 'CycGrp ' ; Cycles : 'Cycles ' ; CytP : 'CytP ' ; DChr : 'DChr ' ; DIGIT_EIGHT : '8 ' ; DIGIT_FIVE : '5 ' ; DIGIT_FOUR : '4 ' ; DIGIT_FOURo : '4o ' ; DIGIT_NINE : '9 ' ; DIGIT_ONE : '1 ' ; DIGIT_ONEP : '1P ' ; DIGIT_ONEQ : '1Q ' ; DIGIT_ONER : '1R ' ; DIGIT_ONE_DIGIT_ZERO : '10 ' ; DIGIT_ONE_FULL_STOP : '1. ' ; DIGIT_ONEo : '1o ' ; DIGIT_ONEr : '1r ' ; DIGIT_ONEst : '1st ' ; DIGIT_ONEstF : '1stF ' ; DIGIT_ONEstc : '1stc ' ; DIGIT_SEVEN : '7 ' ; DIGIT_SIX : '6 ' ; DIGIT_THREE : '3 ' ; DIGIT_THREEo : '3o ' ; DIGIT_TWO : '2 ' ; DIGIT_TWOIdeal : '2Ideal ' ; DIGIT_TWOnd : '2nd ' ; DIGIT_TWOndF : '2ndF ' ; DIGIT_TWOndc : '2ndc ' ; DIGIT_TWOo : '2o ' ; DIGIT_ZERO : '0 ' ; DIGIT_ZEROH : '0H ' ; DIGIT_ZEROR : '0R ' ; DIGIT_ZERO_FULL_STOP : '0. ' ; DIGIT_ZEROcv : '0cv ' ; DIGIT_ZEROg : '0g ' ; DIGIT_ZEROh : '0h ' ; DIGIT_ZEROhop : '0hop ' ; DIGIT_ZEROop : '0op ' ; DIGIT_ZEROp : '0p ' ; DIGIT_ZEROvec : '0vec ' ; DIsoA : 'DIsoA ' ; DIsoB : 'DIsoB ' ; DIsoC : 'DIsoC ' ; DIsoH : 'DIsoH ' ; DLat : 'DLat ' ; DProd : 'DProd ' ; DProj : 'dProj ' ; DVecA : 'DVecA ' ; DVecH : 'DVecH ' ; DWords : 'dWords ' ; Ded : 'Ded ' ; DefAt : 'defAt ' ; Deg : 'deg ' ; DegAA : 'degAA ' ; DegDIGIT_ONE : 'deg1 ' ; Denom : 'denom ' ; Der : 'der ' ; Derv : 'derv ' ; Dgra : 'Dgra ' ; DiagFunc : 'DiagFunc ' ; Dil : 'Dil ' ; Dioph : 'Dioph ' ; DirRel : 'DirRel ' ; Dirset : 'Dirset ' ; DisjLOW_LINE : 'Disj_ ' ; Dist : 'dist ' ; DivRing : 'DivRing ' ; DivRingOps : 'DivRingOps ' ; Dmn : 'Dmn ' ; Dn : 'Dn ' ; Dom : 'dom ' ; DomA : 'domA ' ; DomLOW_LINE : 'dom_ ' ; DomSetCat : 'domSetCat ' ; Domain : 'Domain ' ; Domn : 'Domn ' ; EDRing : 'EDRing ' ; EDRingR : 'EDRingR ' ; EE : 'EE ' ; EFULL_STOP : 'e. ' ; EFULL_STOPg : 'e.g ' ; EQUALS_SIGN : '= ' ; EQUALS_SIGN_EQUALS_SIGN : '== ' ; EQUALS_SIGN_GREATER_THAN_SIGNc : '=>c ' ; EQUALS_SIGN_SOLIDUS_EQUALS_SIGN : '=/= ' ; EQUALS_SIGNg : '=g ' ; ESOLIDUS : 'e/ ' ; EXCLAMATION_MARK : '! ' ; E_ASTERISK : 'E* ' ; E_EXCLAMATION_MARK : 'E! ' ; E_FULL_STOP : 'E. ' ; E_FULL_STOPg : 'E.g ' ; Eigval : 'eigval ' ; Eigvec : 'eigvec ' ; Epi : 'Epi ' ; Epic : 'Epic ' ; Er : 'Er ' ; EulPaths : 'EulPaths ' ; Eval : 'eval ' ; EvalDIGIT_ONE : 'eval1 ' ; EvalF : 'evalF ' ; EvalSub : 'evalSub ' ; EvalSubDIGIT_ONE : 'evalSub1 ' ; ExId : 'ExId ' ; Exp : 'exp ' ; FBas : 'fBas ' ; FClus : 'fClus ' ; FClusf : 'fClusf ' ; FLim : 'fLim ' ; FLimf : 'fLimf ' ; FLimfrs : 'fLimfrs ' ; FULL_STOP : '. ' ; FULL_STOPN : '.N ' ; FULL_STOPP_FULL_STOP : '.P. ' ; FULL_STOPQ : '.Q ' ; FULL_STOPR : '.R ' ; FULL_STOP_FULL_STOP_CIRCUMFLEX_ACCENT : '..^ ' ; FULL_STOP_FULL_STOP_FULL_STOP : '... ' ; FULL_STOPcv : '.cv ' ; FULL_STOPfn : '.fn ' ; FULL_STOPg : '.g ' ; FULL_STOPh : '.h ' ; FULL_STOPi : '.i ' ; FULL_STOPiOLD : '.iOLD ' ; FULL_STOPif : '.if ' ; FULL_STOPih : '.ih ' ; FULL_STOPm : '.m ' ; FULL_STOPo : '.o ' ; FULL_STOPop : '.op ' ; FULL_STOPpQ : '.pQ ' ; FULL_STOPpR : '.pR ' ; FULL_STOPr : '.r ' ; FULL_STOPs : '.s ' ; FULL_STOPsOLD : '.sOLD ' ; FULL_STOPsf : '.sf ' ; FULL_STOPv : '.v ' ; F_FULL_STOP : 'F. ' ; F_SOLIDUS : 'F/ ' ; F_SOLIDUS_LOW_LINE : 'F/_ ' ; Faith : 'Faith ' ; Fi : 'fi ' ; Field : 'Field ' ; Fil : 'Fil ' ; FilGen : 'filGen ' ; FilMap : 'FilMap ' ; Fin : 'Fin ' ; FinDIGIT_FIVE : 'Fin5 ' ; FinDIGIT_FOUR : 'Fin4 ' ; FinDIGIT_ONEa : 'Fin1a ' ; FinDIGIT_SEVEN : 'Fin7 ' ; FinDIGIT_SIX : 'Fin6 ' ; FinDIGIT_THREE : 'Fin3 ' ; FinDIGIT_TWO : 'Fin2 ' ; FiveSeg : 'FiveSeg ' ; Fix : 'Fix ' ; Fld : 'Fld ' ; Fmla : 'Fmla ' ; Fn : 'Fn ' ; Fne : 'Fne ' ; Fr : 'Fr ' ; Fre : 'Fre ' ; FreeGrp : 'freeGrp ' ; FreeLMod : 'freeLMod ' ; FreeMnd : 'freeMnd ' ; FreeSemiGrp : 'FreeSemiGrp ' ; Frf : 'Frf ' ; FriendGrph : 'FriendGrph ' ; Full : 'Full ' ; FullFun : 'FullFun ' ; Fun : 'Fun ' ; Func : 'Func ' ; FuncCat : 'FuncCat ' ; FuncOLD : 'FuncOLD ' ; Funpart : 'Funpart ' ; Funs : 'Funs ' ; GCH : 'GCH ' ; GEx : 'gEx ' ; GF : 'GF ' ; GF_LOW_LINEoo : 'GF_oo ' ; GId : 'GId ' ; GRAVE_ACCENT : '` ' ; GRAVE_ACCENT_APOSTROPHE : '`\' ' ; GREATER_THAN_SIGN : '> ' ; GREATER_THAN_SIGN_FULL_STOP : '>. ' ; GREATER_THAN_SIGN_GREATER_THAN_SIGN : '>> ' ; GREATER_THAN_SIGN_LOW_LINE : '>_ ' ; Gamma : 'gamma ' ; Gcd : 'gcd ' ; GcdOLD : 'gcdOLD ' ; Ge : 'ge ' ; Glb : 'glb ' ; GprodLOW_LINE : 'gprod_ ' ; Grammar : 'Grammar ' ; GraphSetCat : 'graphSetCat ' ; Grp : 'Grp ' ; GrpAct : 'GrpAct ' ; GrpHom : 'GrpHom ' ; GrpIso : 'GrpIso ' ; GrpOp : 'GrpOp ' ; GrpOpHom : 'GrpOpHom ' ; GrpOpIso : 'GrpOpIso ' ; Gsum : 'gsum ' ; HAtoms : 'HAtoms ' ; HDMap : 'HDMap ' ; HDMapDIGIT_ONE : 'HDMap1 ' ; HGMap : 'HGMap ' ; HL : 'HL ' ; HLHil : 'HLHil ' ; HVMap : 'HVMap ' ; HYPHEN_MINUS : '- ' ; HYPHEN_MINUS_DIGIT_ONER : '-1R ' ; HYPHEN_MINUS_DIGIT_ONE_HYPHEN_MINUS_DIGIT_ONE_HYPHEN_MINUS_GREATER_THAN_SIGN : '-1-1-> ' ; HYPHEN_MINUS_DIGIT_ONE_HYPHEN_MINUS_DIGIT_ONE_HYPHEN_MINUSontoHYPHEN_MINUS_GREATER_THAN_SIGN : '-1-1-onto-> ' ; HYPHEN_MINUS_FULL_STOP : '-. ' ; HYPHEN_MINUS_FULL_STOPc : '-.c ' ; HYPHEN_MINUS_FULL_STOPg : '-.g ' ; HYPHEN_MINUS_GREATER_THAN_SIGN : '-> ' ; HYPHEN_MINUS_GREATER_THAN_SIGN_FULL_STOP : '->. ' ; HYPHEN_MINUS_GREATER_THAN_SIGN_FULL_STOP_FULL_STOP : '->.. ' ; HYPHEN_MINUS_GREATER_THAN_SIGNg : '->g ' ; HYPHEN_MINUS_HYPHEN_MINUS_GREATER_THAN_SIGN : '--> ' ; HYPHEN_MINUS_SOLIDUS_REVERSE_SOLIDUS : '-/\\ ' ; HYPHEN_MINUScnHYPHEN_MINUS_GREATER_THAN_SIGN : '-cn-> ' ; HYPHEN_MINUScv : '-cv ' ; HYPHEN_MINUSe : '-e ' ; HYPHEN_MINUSg : '-g ' ; HYPHEN_MINUSh : '-h ' ; HYPHEN_MINUSontoHYPHEN_MINUS_GREATER_THAN_SIGN : '-onto-> ' ; HYPHEN_MINUSoo : '-oo ' ; HYPHEN_MINUSop : '-op ' ; HYPHEN_MINUSr : '-r ' ; HYPHEN_MINUSu : '-u ' ; HYPHEN_MINUSucv : '-ucv ' ; HYPHEN_MINUSv : '-v ' ; H_om : 'hom ' ; Hadd : 'hadd ' ; Halfplane : 'Halfplane ' ; Har : 'har ' ; Haus : 'Haus ' ; Hf : 'Hf ' ; Hil : 'Hil ' ; HmOp : 'HmOp ' ; Hom : 'Hom ' ; HomA : 'HomA ' ; HomF : 'HomF ' ; HomLim : 'HomLim ' ; HomLimB : 'HomLimB ' ; Homeo : 'Homeo ' ; Homf : 'Homf ' ; HrmOp : 'HrmOp ' ; Htpy : 'Htpy ' ; ICIRCUMFLEX_ACCENTi : 'i^i ' ; IDomn : 'IDomn ' ; II : 'II ' ; I_nv : 'inv ' ; Ibcg : 'Ibcg ' ; Ibg : 'Ibg ' ; IblFnM : 'IblFnM ' ; Id : 'Id ' ; IdA : 'IdA ' ; IdFunc : 'idFunc ' ; IdLOW_LINE : 'id_ ' ; IdSetCat : 'IdSetCat ' ; Idl : 'Idl ' ; IdlGen : 'IdlGen ' ; IdlGenDIGIT_ONEp : 'idlGen1p ' ; IdlNEW : 'IdlNEW ' ; If : 'if ' ; Ig : 'Ig ' ; Im : 'Im ' ; Image : 'Image ' ; Img : 'Img ' ; Impc : 'impc ' ; Inacc : 'Inacc ' ; InaccW : 'InaccW ' ; IndCls : 'IndCls ' ; IndMet : 'IndMet ' ; InfLOW_LINE : 'inf_ ' ; Infw : 'infw ' ; InitObj : 'InitObj ' ; InnerFiveSeg : 'InnerFiveSeg ' ; Int : 'int ' ; IntgOver : 'IntgOver ' ; IntgRing : 'IntgRing ' ; Intvl : 'Intvl ' ; Inv : 'Inv ' ; Invg : 'invg ' ; Invr : 'invr ' ; Iop : 'Iop ' ; Iota : 'iota ' ; IotaLOW_LINE : 'iota_ ' ; Irred : 'Irred ' ; Ismty : 'Ismty ' ; Iso : 'Iso ' ; IsoOLD : 'IsoOLD ' ; Isofunc : 'Isofunc ' ; IsolatedPt : 'IsolatedPt ' ; Isom : 'Isom ' ; Join : 'join ' ; JoinH : 'joinH ' ; KGen : 'kGen ' ; KQ : 'KQ ' ; Ketbra : 'ketbra ' ; Kleene : 'Kleene ' ; KolDIGIT_TWO : 'Kol2 ' ; LAut : 'LAut ' ; LBasis : 'LBasis ' ; LCDual : 'LCDual ' ; LDil : 'LDil ' ; LDual : 'LDual ' ; LEFT_CURLY_BRACKET : '{ ' ; LEFT_PARENTHESIS : '( ' ; LEFT_PARENTHESIS_COMMA_RIGHT_PARENTHESIS : '(,) ' ; LEFT_PARENTHESIS_COMMA_RIGHT_SQUARE_BRACKET : '(,] ' ; LEFT_PARENTHESIS_FULL_STOP : '(. ' ; LEFT_PARENTHESIS_LEFT_PARENTHESIS : '(( ' ; LEFT_PARENTHESIS_PLUS_SIGN_PLUS_SIGN_RIGHT_PARENTHESIS : '(++) ' ; LEFT_PARENTHESIS_PLUS_SIGN_RIGHT_PARENTHESISm : '(+)m ' ; LEFT_PARENTHESIS_RIGHT_PARENTHESIS : '() ' ; LEFT_PARENTHESIS_SOLIDUS_RIGHT_PARENTHESIS : '(/) ' ; LEFT_PARENTHESISxRIGHT_PARENTHESIS : '(x) ' ; LEFT_SQUARE_BRACKET : '[ ' ; LEFT_SQUARE_BRACKETC_FULL_STOP_RIGHT_SQUARE_BRACKET : '[C.] ' ; LEFT_SQUARE_BRACKET_COMMA_RIGHT_PARENTHESIS : '[,) ' ; LEFT_SQUARE_BRACKET_COMMA_RIGHT_SQUARE_BRACKET : '[,] ' ; LEFT_SQUARE_BRACKET_FULL_STOP : '[. ' ; LEFT_SQUARE_BRACKET_FULL_STOP_RIGHT_SQUARE_BRACKET : '[.] ' ; LEFT_SQUARE_BRACKET_LOW_LINE : '[_ ' ; LEFT_SQUARE_BRACKET_RIGHT_SQUARE_BRACKETNN : '[]NN ' ; LESS_THAN_SIGN : '< ' ; LESS_THAN_SIGNN : '<N ' ; LESS_THAN_SIGNP : '<P ' ; LESS_THAN_SIGNQ : '<Q ' ; LESS_THAN_SIGNR : '<R ' ; LESS_THAN_SIGNRR : '<RR ' ; LESS_THAN_SIGN_EQUALS_SIGN_GREATER_THAN_SIGNc : '<=>c ' ; LESS_THAN_SIGN_FULL_STOP : '<. ' ; LESS_THAN_SIGN_GREATER_THAN_SIGN : '<> ' ; LESS_THAN_SIGN_HYPHEN_MINUS_GREATER_THAN_SIGN : '<-> ' ; LESS_THAN_SIGN_HYPHEN_MINUS_GREATER_THAN_SIGNg : '<->g ' ; LESS_THAN_SIGN_LESS_THAN_SIGN : '<< ' ; LESS_THAN_SIGN_LOW_LINE : '<_ ' ; LESS_THAN_SIGN_LOW_LINEO_LEFT_PARENTHESIS_DIGIT_ONE_RIGHT_PARENTHESIS : '<_O(1) ' ; LESS_THAN_SIGN_LOW_LINEop : '<_op ' ; LESS_THAN_SIGN_QUOTATION_MARK : '<" ' ; LESS_THAN_SIGNbag : '<bag ' ; LESS_THAN_SIGNo : '<o ' ; LESS_THAN_SIGNoH : '<oH ' ; LESS_THAN_SIGNoL : '<oL ' ; LESS_THAN_SIGNpQ : '<pQ ' ; LESS_THAN_SIGNs : '<s ' ; LFinGen : 'LFinGen ' ; LFnl : 'LFnl ' ; LHyp : 'LHyp ' ; LIdeal : 'LIdeal ' ; LIndF : 'LIndF ' ; LIndS : 'LIndS ' ; LKer : 'LKer ' ; LLines : 'LLines ' ; LMHom : 'LMHom ' ; LMIso : 'LMIso ' ; LMod : 'LMod ' ; LNoeM : 'LNoeM ' ; LNoeR : 'LNoeR ' ; LOW_LINE : '_ ' ; LOW_LINEC : '_C ' ; LOW_LINED : '_D ' ; LOW_LINEE : '_E ' ; LOW_LINEFrSe : '_FrSe ' ; LOW_LINEG : '_G ' ; LOW_LINEI : '_I ' ; LOW_LINEInd : '_Ind ' ; LOW_LINEQp : '_Qp ' ; LOW_LINESe : '_Se ' ; LOW_LINETrFo : '_TrFo ' ; LOW_LINEV : '_V ' ; LOW_LINEZZ : '_ZZ ' ; LOW_LINE_VERTICAL_LINE_LOW_LINE : '_|_ ' ; LOW_LINE_VERTICAL_LINE_LOW_LINEP : '_|_P ' ; LOW_LINE_VERTICAL_LINE_LOW_LINEc : '_|_c ' ; LOW_LINEd : '_d ' ; LOW_LINEe : '_e ' ; LOW_LINEi : '_i ' ; LOW_LINEpred : '_pred ' ; LOW_LINEtrCl : '_trCl ' ; LPIR : 'LPIR ' ; LPIdeal : 'LPIdeal ' ; LPlanes : 'LPlanes ' ; LPol : 'LPol ' ; LSAtoms : 'LSAtoms ' ; LSHyp : 'LSHyp ' ; LSSum : 'LSSum ' ; LSpan : 'LSpan ' ; LSubSp : 'LSubSp ' ; LTrn : 'LTrn ' ; LVec : 'LVec ' ; LVols : 'LVols ' ; L_CIRCUMFLEX_ACCENT_DIGIT_ONE : 'L^1 ' ; L_ine : 'line ' ; Lam : 'Lam ' ; Lambda : 'Lambda ' ; Lat : 'Lat ' ; LatAlg : 'LatAlg ' ; LatRel : 'LatRel ' ; Lb : 'lb ' ; LdgIdlSeq : 'ldgIdlSeq ' ; Le : 'le ' ; LeR : 'leR ' ; Lim : 'Lim ' ; LimCC : 'limCC ' ; LimCat : 'LimCat ' ; LimPt : 'limPt ' ; Limits : 'Limits ' ; Limsup : 'limsup ' ; LinFn : 'LinFn ' ; LinOp : 'LinOp ' ; Line : 'Line ' ; LineDIGIT_THREE : 'line3 ' ; Lines : 'Lines ' ; LinesEE : 'LinesEE ' ; LnOp : 'LnOp ' ; LocFin : 'LocFin ' ; Locally : 'Locally ' ; Log : 'log ' ; LogLOW_LINE : 'log_ ' ; LogLOW_LINEG : 'log_G ' ; Logb : 'logb ' ; Lt : 'lt ' ; Lub : 'lub ' ; MDeg : 'mDeg ' ; MEndo : 'MEndo ' ; MH : 'MH ' ; MH_ASTERISK : 'MH* ' ; MHomP : 'mHomP ' ; MPSDer : 'mPSDer ' ; MPoly : 'mPoly ' ; MPwSer : 'mPwSer ' ; MVar : 'mVar ' ; MaAdju : 'maAdju ' ; MaDet : 'maDet ' ; MaMul : 'maMul ' ; Magma : 'Magma ' ; Mapd : 'mapd ' ; Mat : 'Mat ' ; MaxIdl : 'MaxIdl ' ; MblFn : 'MblFn ' ; MblFnM : 'MblFnM ' ; Measures : 'measures ' ; Meet : 'meet ' ; Met : 'Met ' ; MetOpen : 'MetOpen ' ; MetSp : 'MetSp ' ; MinPolyAA : 'minPolyAA ' ; Mmu : 'mmu ' ; Mnd : 'Mnd ' ; MndHom : 'MndHom ' ; MndOp : 'MndOp ' ; Mnl : 'mnl ' ; Mod : 'mod ' ; Monic : 'Monic ' ; MonicDIGIT_ONEp : 'Monic1p ' ; Mono : 'Mono ' ; MonoAP : 'MonoAP ' ; MonoOLD : 'MonoOLD ' ; Moore : 'Moore ' ; MorphismSetCat : 'MorphismSetCat ' ; MrCls : 'mrCls ' ; MrInd : 'mrInd ' ; MulGrp : 'mulGrp ' ; Mxl : 'mxl ' ; MzPoly : 'mzPoly ' ; MzPolyCld : 'mzPolyCld ' ; NGHom : 'NGHom ' ; NMHom : 'NMHom ' ; NN : 'NN ' ; NN_DIGIT_ZERO : 'NN0 ' ; NUMBER_SIGN : '# ' ; N_FULL_STOP : 'N. ' ; N_HYPHEN_MINUSLocally : 'N-Locally ' ; Nat : 'Nat ' ; Natural : 'Natural ' ; Ndx : 'ndx ' ; Nei : 'nei ' ; Neighbors : 'Neighbors ' ; Neug : 'Neug ' ; No : 'No ' ; NoeACS : 'NoeACS ' ; Norm : 'norm ' ; NormCV : 'normCV ' ; NormOp : 'normOp ' ; NormOpOLD : 'normOpOLD ' ; Normfn : 'normfn ' ; Normh : 'normh ' ; Normop : 'normop ' ; Notc : 'notc ' ; Nrm : 'Nrm ' ; NrmCVec : 'NrmCVec ' ; NrmGrp : 'NrmGrp ' ; NrmMod : 'NrmMod ' ; NrmRing : 'NrmRing ' ; NrmSGrp : 'NrmSGrp ' ; NrmVec : 'NrmVec ' ; Null : 'null ' ; Numer : 'numer ' ; NzRing : 'NzRing ' ; OBasis : 'OBasis ' ; ODual : 'ODual ' ; OF : 'oF ' ; OFC : 'oFC ' ; OFULL_STOP : 'o. ' ; OFULL_STOPfunc : 'o.func ' ; OL : 'OL ' ; OLOW_LINE : 'o_ ' ; OML : 'OML ' ; OP : 'OP ' ; OR : 'oR ' ; ORVC : 'oRVC ' ; O_LEFT_PARENTHESIS_DIGIT_ONE_RIGHT_PARENTHESIS : 'O(1) ' ; Oc : 'oc ' ; OcA : 'ocA ' ; OcH : 'ocH ' ; Ocv : 'ocv ' ; Od : 'od ' ; OdZ : 'odZ ' ; Om : 'om ' ; OmDIGIT_ONE : 'Om1 ' ; OmN : 'OmN ' ; On : 'On ' ; Opfn : 'opfn ' ; OppCat : 'oppCat ' ; OppG : 'oppG ' ; OppR : 'oppR ' ; Or : 'Or ' ; OrHom : 'OrHom ' ; OrIso : 'OrIso ' ; Ord : 'Ord ' ; OrdIso : 'OrdIso ' ; OrdPwSer : 'ordPwSer ' ; OrdTop : 'ordTop ' ; Ors : 'ors ' ; OuterFiveSeg : 'OuterFiveSeg ' ; OutsideOf : 'OutsideOf ' ; PAut : 'PAut ' ; PCl : 'PCl ' ; PCnt : 'pCnt ' ; PCon : 'PCon ' ; PGrp : 'pGrp ' ; PHtpy : 'PHtpy ' ; PID : 'PID ' ; PLUS_SIGN : '+ ' ; PLUS_SIGNH : '+H ' ; PLUS_SIGNN : '+N ' ; PLUS_SIGNP : '+P ' ; PLUS_SIGNP_FULL_STOP : '+P. ' ; PLUS_SIGNQ : '+Q ' ; PLUS_SIGNR : '+R ' ; PLUS_SIGN_HYPHEN_MINUS : '+- ' ; PLUS_SIGNc : '+c ' ; PLUS_SIGNcv : '+cv ' ; PLUS_SIGNe : '+e ' ; PLUS_SIGNf : '+f ' ; PLUS_SIGNfn : '+fn ' ; PLUS_SIGNg : '+g ' ; PLUS_SIGNh : '+h ' ; PLUS_SIGNm : '+m ' ; PLUS_SIGNo : '+o ' ; PLUS_SIGNoo : '+oo ' ; PLUS_SIGNop : '+op ' ; PLUS_SIGNpQ : '+pQ ' ; PLUS_SIGNpR : '+pR ' ; PLUS_SIGNr : '+r ' ; PLUS_SIGNv : '+v ' ; PLines : 'PLines ' ; PNrm : 'PNrm ' ; PPoints : 'PPoints ' ; PSubCl : 'PSubCl ' ; PSubSp : 'PSubSp ' ; PSyl : 'pSyl ' ; P_FULL_STOP : 'P. ' ; PairF : 'pairF ' ; PathOn : 'PathOn ' ; Paths : 'Paths ' ; Pc : 'Pc ' ; PellDIGIT_ONEQR : 'Pell1QR ' ; PellDIGIT_ONE_DIGIT_FOURQR : 'Pell14QR ' ; PellDIGIT_ONE_DIGIT_TWO_DIGIT_THREE_DIGIT_FOURQR : 'Pell1234QR ' ; PellFund : 'PellFund ' ; Perf : 'Perf ' ; Phc : 'phc ' ; Phi : 'phi ' ; Pi : 'pi ' ; PiDIGIT_ONE : 'pi1 ' ; PiN : 'piN ' ; Plig : 'Plig ' ; PmSgn : 'pmSgn ' ; PmTrsp : 'pmTrsp ' ; Pmap : 'pmap ' ; Po : 'Po ' ; Points : 'Points ' ; Poly : 'Poly ' ; PolyAP : 'PolyAP ' ; PolyDIGIT_ONE : 'Poly1 ' ; PolyFld : 'polyFld ' ; PolyLESS_THAN_SIGN : 'Poly< ' ; PolySplitLim : 'polySplitLim ' ; Poset : 'Poset ' ; PosetRel : 'PosetRel ' ; Ppi : 'ppi ' ; Pprod : 'pprod ' ; Pr : 'pr ' ; PrIdl : 'PrIdl ' ; PrRing : 'PrRing ' ; Prdct : 'prdct ' ; PreHil : 'PreHil ' ; Pred : 'Pred ' ; Preset : 'Preset ' ; PresetRel : 'PresetRel ' ; Prime : 'Prime ' ; Prj : 'prj ' ; Prob : 'Prob ' ; ProdDIGIT_THREE_LOW_LINE : 'prod3_ ' ; ProdDIGIT_TWO_LOW_LINE : 'prod2_ ' ; ProdLOW_LINE : 'prod_ ' ; ProdObj : 'ProdObj ' ; Proj : 'proj ' ; ProjDIGIT_ONE : 'proj1 ' ; Projh : 'projh ' ; Prop : 'Prop ' ; Provable : '|- ' ; Prt : 'Prt ' ; Psc : 'psc ' ; Psi : 'psi ' ; PtDf : 'PtDf ' ; PtFin : 'PtFin ' ; PwSerDIGIT_ONE : 'PwSer1 ' ; QQ : 'QQ ' ; QTop : 'qTop ' ; QUOTATION_MARK : '" ' ; QUOTATION_MARK_GREATER_THAN_SIGN : '"> ' ; QUOTATION_MARKs : '"s ' ; Q_FULL_STOP : 'Q. ' ; Qp : 'Qp ' ; Quot : 'quot ' ; QuotDIGIT_ONEp : 'quot1p ' ; RAffSp : 'RAffSp ' ; REVERSE_SOLIDUS : '\\ ' ; REVERSE_SOLIDUS_SOLIDUS : '\\/ ' ; REVERSE_SOLIDUS_SOLIDUSH : '\\/H ' ; REVERSE_SOLIDUS_SOLIDUS_LOW_LINE : '\\/_ ' ; REVERSE_SOLIDUS_SOLIDUSc : '\\/c ' ; REVERSE_SOLIDUS_SOLIDUSg : '\\/g ' ; RIGHT_CURLY_BRACKET : '} ' ; RIGHT_PARENTHESIS : ') ' ; RIGHT_PARENTHESIS_FULL_STOP : '). ' ; RIGHT_PARENTHESIS_RIGHT_PARENTHESIS : ')) ' ; RIGHT_SQUARE_BRACKET : '] ' ; RIGHT_SQUARE_BRACKET_FULL_STOP : ']. ' ; RIGHT_SQUARE_BRACKET_LOW_LINE : ']_ ' ; RLReg : 'RLReg ' ; RPrime : 'RPrime ' ; RR : 'RR ' ; RR_ASTERISK : 'RR* ' ; RR_ASTERISKs : 'RR*s ' ; RR_DIGIT_THREE : 'RR3 ' ; RR_PLUS_SIGN : 'RR+ ' ; RRndVar : 'rRndVar ' ; RSpan : 'RSpan ' ; RVec : 'RVec ' ; R_DIGIT_ONE : 'R1 ' ; R_FULL_STOP : 'R. ' ; R_ay : 'ray ' ; Ramsey : 'Ramsey ' ; Ran : 'ran ' ; Range : 'Range ' ; Rank : 'rank ' ; Ray : 'Ray ' ; Re : 'Re ' ; Rec : 'rec ' ; Recs : 'recs ' ; Ref : 'Ref ' ; Reg : 'Reg ' ; Rel : 'Rel ' ; RemDIGIT_ONEp : 'rem1p ' ; Restrict : 'Restrict ' ; Retr : 'Retr ' ; Reverse : 'reverse ' ; Ring : 'Ring ' ; RingHom : 'RingHom ' ; RingIso : 'RingIso ' ; RingLMod : 'ringLMod ' ; RingOps : 'RingOps ' ; RingSpan : 'RingSpan ' ; RmX : 'rmX ' ; RmY : 'rmY ' ; Rn : 'Rn ' ; RngHom : 'RngHom ' ; RngIso : 'RngIso ' ; RoSetCat : 'roSetCat ' ; SCon : 'SCon ' ; SEMICOLON : '; ' ; SH : 'SH ' ; SOLIDUS : '/ ' ; SOLIDUSL : '/L ' ; SOLIDUSQ : '/Q ' ; SOLIDUSQp : '/Qp ' ; SOLIDUS_FULL_STOP : '/. ' ; SOLIDUS_REVERSE_SOLIDUS : '/\\ ' ; SOLIDUS_REVERSE_SOLIDUSc : '/\\c ' ; SOLIDUS_REVERSE_SOLIDUSg : '/\\g ' ; SOLIDUScv : '/cv ' ; SOLIDUSe : '/e ' ; SOLIDUSg : '/g ' ; SOLIDUSr : '/r ' ; SOLIDUSs : '/s ' ; SPaths : 'SPaths ' ; SSet : 'SSet ' ; SX : 'sX ' ; S_FULL_STOP : 'S. ' ; S_FULL_STOP_DIGIT_ONE : 'S.1 ' ; S_FULL_STOP_DIGIT_TWO : 'S.2 ' ; S_FULL_STOPm : 'S.m ' ; S_LOW_LINE : 'S_ ' ; S_Set : 'sSet ' ; Sadd : 'sadd ' ; Sat : 'Sat ' ; SatE : 'SatE ' ; Scalar : 'Scalar ' ; Se : 'Se ' ; Sec : 'sec ' ; Sect : 'Sect ' ; Seg : 'seg ' ; SegLESS_THAN_SIGN_LOW_LINE : 'Seg<_ ' ; Segc : 'segc ' ; Segments : 'Segments ' ; Segtrg : 'segtrg ' ; SelectVars : 'selectVars ' ; SemiGrp : 'SemiGrp ' ; SemiGrpHom : 'SemiGrpHom ' ; Seq : 'seq ' ; Seqom : 'seqom ' ; Set : 'set ' ; SetCat : 'SetCat ' ; SetCatOLD : 'SetCatOLD ' ; Sgn : 'sgn ' ; Shift : 'shift ' ; SigAlgebra : 'sigAlgebra ' ; SigaGen : 'sigaGen ' ; Sigma : 'sigma ' ; Sin : 'sin ' ; Singleton : 'Singleton ' ; Singletons : 'Singletons ' ; Sinh : 'sinh ' ; Sink : 'Sink ' ; Slices : 'slices ' ; Slot : 'Slot ' ; Smo : 'Smo ' ; Smul : 'smul ' ; Source : 'Source ' ; Span : 'span ' ; Splice : 'splice ' ; SplitFld : 'splitFld ' ; SplitFldDIGIT_ONE : 'splitFld1 ' ; Sqr : 'sqr ' ; Ss : 'ss ' ; States : 'States ' ; Struct : 'Struct ' ; SubCat : 'SubCat ' ; SubDRing : 'SubDRing ' ; SubGrp : 'SubGrp ' ; SubGrpOp : 'SubGrpOp ' ; SubMnd : 'SubMnd ' ; SubRing : 'SubRing ' ; SubSemiGrp : 'SubSemiGrp ' ; SubSemiGrpGen : 'subSemiGrpGen ' ; SubSp : 'SubSp ' ; SubVec : 'SubVec ' ; Subcat : 'Subcat ' ; SubringAlg : 'subringAlg ' ; Substr : 'substr ' ; Suc : 'suc ' ; Succ : 'Succ ' ; SumASTERISK : 'sum* ' ; SumLOW_LINE : 'sum_ ' ; SumObj : 'SumObj ' ; Sup : 'sup ' ; SupLOW_LINE : 'sup_ ' ; Supw : 'supw ' ; Sym : 'sym ' ; SymGrp : 'SymGrp ' ; TASTERISK : 't* ' ; TASTERISKrec : 't*rec ' ; TC : 'TC ' ; TEndo : 'TEndo ' ; TGrp : 'TGrp ' ; TILDEFG : '~FG ' ; TILDEH : '~H ' ; TILDEP : '~P ' ; TILDEQ : '~Q ' ; TILDEQG : '~QG ' ; TILDEQp : '~Qp ' ; TILDER : '~R ' ; TILDE_EQUALS_SIGN : '~= ' ; TILDE_EQUALS_SIGNg : '~=g ' ; TILDE_EQUALS_SIGNm : '~=m ' ; TILDE_EQUALS_SIGNph : '~=ph ' ; TILDE_EQUALS_SIGNr : '~=r ' ; TILDE_LESS_THAN_SIGN : '~< ' ; TILDE_LESS_THAN_SIGN_LOW_LINE : '~<_ ' ; TILDE_LESS_THAN_SIGN_LOW_LINE_ASTERISK : '~<_* ' ; TILDE_TILDE : '~~ ' ; TILDE_TILDE_GREATER_THAN_SIGN : '~~> ' ; TILDE_TILDE_GREATER_THAN_SIGNr : '~~>r ' ; TILDE_TILDE_GREATER_THAN_SIGNt : '~~>t ' ; TILDE_TILDE_GREATER_THAN_SIGNu : '~~>u ' ; TILDE_TILDE_GREATER_THAN_SIGNv : '~~>v ' ; TPLUS_SIGN : 't+ ' ; TX : 'tX ' ; T_FULL_STOP : 'T. ' ; T_r : 'tr ' ; Tail : 'tail ' ; Tan : 'tan ' ; Tanh : 'tanh ' ; Tar : 'tar ' ; Tarski : 'Tarski ' ; TarskiMap : 'tarskiMap ' ; Tayl : 'Tayl ' ; TermObj : 'TermObj ' ; Theta : 'theta ' ; ToCHil : 'toCHil ' ; ToHL : 'toHL ' ; ToInc : 'toInc ' ; ToMetSp : 'toMetSp ' ; ToNrmGrp : 'toNrmGrp ' ; ToPolyDIGIT_ONE : 'toPoly1 ' ; Tofld : 'Tofld ' ; Top : 'Top ' ; TopBases : 'TopBases ' ; TopDRing : 'TopDRing ' ; TopFld : 'TopFld ' ; TopGen : 'topGen ' ; TopGrp : 'TopGrp ' ; TopLnd : 'TopLnd ' ; TopMnd : 'TopMnd ' ; TopMod : 'TopMod ' ; TopOn : 'TopOn ' ; TopOpen : 'TopOpen ' ; TopRing : 'TopRing ' ; TopSep : 'TopSep ' ; TopSet : 'TopSet ' ; TopSp : 'TopSp ' ; TopSpOLD : 'TopSpOLD ' ; TopVec : 'TopVec ' ; TopX : 'topX ' ; Toset : 'Toset ' ; TosetRel : 'TosetRel ' ; TotBnd : 'TotBnd ' ; Tpos : 'tpos ' ; Tr : 'Tr ' ; TrL : 'trL ' ; TrPred : 'TrPred ' ; TrailOn : 'TrailOn ' ; Trails : 'Trails ' ; Trans : 'Trans ' ; TransportTo : 'TransportTo ' ; Trcng : 'trcng ' ; Triangle : 'triangle ' ; Trn : 'Trn ' ; Tsums : 'tsums ' ; UFL : 'UFL ' ; UFULL_STOP : 'u. ' ; UFil : 'UFil ' ; UMGrph : 'UMGrph ' ; USGrph : 'USGrph ' ; USLGrph : 'USLGrph ' ; U_FULL_STOP : 'U. ' ; U_LOW_LINE : 'U_ ' ; Ub : 'ub ' ; Uncurry : 'uncurry ' ; UncurryF : 'uncurryF ' ; Undef : 'Undef ' ; UniOp : 'UniOp ' ; UnicDIGIT_ONEp : 'Unic1p ' ; Unif : 'Unif ' ; UnifSp : 'UnifSp ' ; Unit : 'Unit ' ; UnitVec : 'unitVec ' ; Univ : 'Univ ' ; UnivVertex : 'UnivVertex ' ; Until : 'until ' ; VA : 'vA ' ; VDeg : 'VDeg ' ; VERTICAL_LINE : '| ' ; VERTICAL_LINE_CIRCUMFLEX_ACCENT_VERTICAL_LINE : '|^| ' ; VERTICAL_LINE_CIRCUMFLEX_ACCENT_VERTICAL_LINE_LOW_LINE : '|^|_ ' ; VERTICAL_LINE_EQUALS_SIGN : '|= ' ; VERTICAL_LINE_GRAVE_ACCENT : '|` ' ; VERTICAL_LINE_GRAVE_ACCENTcat : '|`cat ' ; VERTICAL_LINE_GRAVE_ACCENTf : '|`f ' ; VERTICAL_LINE_GRAVE_ACCENTs : '|`s ' ; VERTICAL_LINE_GRAVE_ACCENTt : '|`t ' ; VERTICAL_LINE_HYPHEN_MINUS_GREATER_THAN_SIGN : '|-> ' ; VERTICAL_LINE_LOW_LINE : '|_ ' ; VERTICAL_LINE_VERTICAL_LINE : '|| ' ; VERTICAL_LINE_VERTICAL_LINEr : '||r ' ; VERTICAL_LINEg : '|g ' ; VH : 'vH ' ; VarDIGIT_ONE : 'var1 ' ; VarFGrp : 'varFGrp ' ; VarFMnd : 'varFMnd ' ; Vec : 'Vec ' ; Vol : 'vol ' ; VolASTERISK : 'vol* ' ; WAtoms : 'WAtoms ' ; WUni : 'WUni ' ; WUniCl : 'wUniCl ' ; WalkOn : 'WalkOn ' ; Walks : 'Walks ' ; We : 'We ' ; Wff : 'wff ' ; Word : 'Word ' ; Words : 'Words ' ; XFULL_STOP : 'x. ' ; XX_FULL_STOP : 'XX. ' ; X_FULL_STOP : 'X. ' ; X_LOW_LINE : 'X_ ' ; XcFULL_STOP : 'Xc. ' ; Xm : 'xm ' ; Xp : 'Xp ' ; XsFULL_STOP : 'Xs. ' ; XsLOW_LINE : 'Xs_ ' ; XtLOW_LINE : 'Xt_ ' ; Yon : 'Yon ' ; ZF : 'ZF ' ; ZMod : 'ZMod ' ; ZRHom : 'ZRHom ' ; ZRing : 'ZRing ' ; ZZ : 'ZZ ' ; ZZ_GREATER_THAN_SIGN_EQUALS_SIGN : 'ZZ>= ' ; Z_LEFT_SQUARE_BRACKETiRIGHT_SQUARE_BRACKET : 'Z[i] ' ; Z_SOLIDUSnZ : 'Z/nZ ' ; ZeroDiv : 'zeroDiv ' ; Zeta : 'zeta ' ; Zp : 'Zp ' ;
On Thu, Feb 21, 2019 at 12:48 AM Mario Carneiro <di....@gmail.com> wrote:Indeed, dealing with modularization seems pretty important and apparently caused problems for Raph, so perhaps I should give it more thought.In a specification file such as this, we don't have to worry about proof dependencies, but we do have to be able to import axiom systems and syntax from another file. But now I'm worried about the possible security vulnurabilities this causes. A specification file is supposed to be self contained in the sense that if you know the language then it says what it means. If a specification file imports another, then the meaning of the first file depends on the meaning of that other file as well. It's even worse if the path is ambiguous in some way, and we don't see exactly what it's referring to. If the includes are relative to a path provided at the command line, then it's not even definite what file is being referred to.One way to make sure the import is right is to reproduce the axioms and theorems that are expected to be imported. This way the verifier can double check that all the files are stitched together correctly. But that may well be the whole file or nearly so (theorems don't need to be imported, but axioms and definitions do). And that's a hell of a lot of boilerplate, and also obscures the actual content of the file if it starts with a lengthy recap section.The specification file / proof file dichotomy is supposed to represent the distinction between *what* is being proven and *how* it should be proven. The proof file is a fine place for putting in instructions for how to find other files, but the specification files should be more focused on content and cross checks.I will return to this topic in a bit with a more concrete syntax proposal.I was thinking more in terms of development than modularization / meaning. The "correct" file to attest meaning and validation of proofs is the solid, self contained file, which in turn could be hashed / signed, (and so, proof files need a syntax to permit their coexistence on a self contained file).
The `file` and `file.import` statements are only for the easy of development -- think the mega files of Metamath versus GitHub, and source control in general. In this context, `file` directives will be treated as comments, only utilized on spliting and joining files.Modularization comes last, as it is better treated in syntax / language itself. May be a Python-like `import` statement.
b) if it is easy to make things computer-understandable (build a tree), it might easy (or not :/)to turn those things into human-friendly stuff (display, write and interact with it nicely) with another software layer that isn't in the languagec) writing things twice should be a big no no for any programmer and mathematician
d) even if you write things twice in your database, you'll still have to build a parser to handle human input anyway :)
> nonempty sort wff;
If I am not wrong, all sorts in your example are defined to be nonempty.
So why isn't the keyword "sort" set to directly mean a "non-empty sort",
instead of having to specify it? Or else, what the use for a sort which
is not declared nonempty? [cosmetic]
> var ph ps ch: wff*;
I sort of understand what does the * mean. But why not introducing
another keyword to accompany "var" (maybe "open"?), instead of adding
the star, which is inhomogeneous with the rest of the syntax? [cosmetic]
> term wi (ph ps: wff): wff; infixr wi: -> 25;
I like this way to specify the syntax for "human-readable" expressions
(and I also like very much that idea of introducing a
more-human-readable expression syntax over Metamath altogether). I like
even more the proposal that was made to only specify a partial priority
order, basically for two reasons: first, because it removes the problem
of deciding how much "priority space" one has to leave between two
different operators; second, because it makes operators coming from two
different sources automatically not comparable, instead of automatically
having a random priority order depending on the specific numbers chosen
by the two sources.
I also understand that in this setting the use of parentheses "(" and
")" to force priority is hardcoded in the language and cannot be
changed, say, to "[" and "]" by a specific file. It is not necessarily
bad, it is just different from Metamath, where the choice of "(" and ")"
is completely arbitrary. [cosmetic]
Also, "infix" without the "r" is meant to be infix left? Or is there
also an "infixl"? In that case, what does "infix" mean? If "infix" is
already infix left, I would suggest to use "infixl" and leave out
"infix", for better symmetry. [cosmetic]
> term wn (ph: wff): wff; prefix wn: ~ 40;
>
> axiom ax-1: << ph -> ps -> ph >>
>
> (wi ph (wi ps ph));
I am among those that do not like these double expressions. I do not
like in general when the same file contains some data that is mean to be
hand-written and some data that is meant to be auto-generated. I would
definitely prefer that the verifier is required to parse mathematical
expressions and check them, so that the file is entirely hand-written.
The proposal to include a proof of the grammar unambiguity in the proof
file sounds reasonable to me.
The above comment calls for eliminating the need for chevrons, but if
they are to stay, then I would suggest replacing them with some other
syntax, in order to make less confusion with arrows and leave them
available for the math syntax (where they can mean "much greater than").
I would propose "$$", which seems to be less likely to be used in maths.
[cosmetic]
> theorem a1i: << ph >> ph -> << ps -> ph >> (wi ps ph);
You do not specify what is the proof format. It is ok for me to store it
in another file (actually, I find it positive, in that it goes in the
direction of separating hand-written and generated content) and I do not
need the details of the encoding, but at least I would like to know what
is the abstract format. In Metamath the abstract format is the
substitution, but I believe something has to change in Metamath Zero,
since the abstract expression representation has changed too (Metamath
deals with partially with symbols strings and partially with their
parsing tree, although without guaranteeing that it is unique, while
Metamath Zero seems to be dealing with
parsing-trees-plus-bindings-information; is this correct?).
I like very much this syntax for definitions. I like the fact that the
soundness check is built into the language, and I like the idea that the
definition is "valid" only for a limited scope, and has to be used via
theorems afterwards. This, if correctly used, enables to separate the
axioms (or "interface") of a theory from its foundational construction
("implementation"), which is what in Metamath is done manually (and,
again, requiring a lot of external verification) for number sets.
Ideally this enables users to "rebase" theories on different
foundations, as long as the definition block does not let foundational
details escape.
I believe that the definition is somehow materialized as an additional
step available to the proof file for theorems appearing in the
definition block. Can you give details on this? Also, what is required
from the proof file to guarantee that the definition is sound?
def wb (ph ps: wff): wff := $ ~((ph -> ps) -> ~(ps -> ph)) $ {
infix wb: <-> 20;
theorem bi1: $ (ph <-> ps) -> ph -> ps $;
theorem bi2: $ (ph <-> ps) -> ps -> ph $;
theorem bi3: $ (ph -> ps) -> (ps -> ph) -> (ph <-> ps) $;
}
proof bi1: $ ~((ph -> ps) -> ~(ps -> ph)) -> ph -> ps $ := ...;
proof bi2: $ ~((ph -> ps) -> ~(ps -> ph)) -> ps -> ph $ := ...;
proof bi3: $ (ph -> ps) -> (ps -> ph) -> ~((ph -> ps) -> ~(ps -> ph)) $ := ...;
def wtru (bound p: wff): wff := $ p <-> p $ {
notation wtru := $ T. $;
theorem df-tru: $ T. <-> (ph <-> ph) $;
}
def weu (x: set) (ph: wff x) (bound y: set): wff := $ E. y A. x (ph <-> x = y) $ {
prefix weu: E! 30;
theorem df-eu (x y: set) (ph: wff x): $ E! x ph <-> E. y A. x (ph <-> x = y) $;
}
proof df-tru (ph ps: wff): $ (ps <-> ps) <-> (ph <-> ph) $ := ...;
proof df-eu (x y z: set) (ph: wff x):
$ E. z A. x (ph <-> x = z) <-> E. y A. x (ph <-> x = y) $ := ...;
> defwa(phps: wff): wff := << ~(ph-> ~ps) >> (wn (wi ph(wn ps))) {
>
> infixwa: /\ 20;
>
> theoremdf-an: << (ph/\ ps) <-> ~(ph-> ~ps) >>
>
> (wi (wa phps) (wn (wi ph(wn ps))));
>
> }
>
>
> defwtru(boundp: wff): wff := << p<-> p>> (wb pp) {
>
> notationwtru:= << T. >>;
I guess "notation" is another syntax constructor for the
"human-readable" expressions, isn't it? So it is on the same level with
"prefix", "infix" and "infixr", except that these latter are somewhat
more "smart".
> pure nonempty sortset;
What does "pure" mean?
> bound x y z w: set;
What does prevent the user to declare bound variables on sorts for which
this should not happen, like wff or class? Is this what "pure" is for? I
have the feeling I could have seen something related to this question in
the thread, but I cannot find it any more, sorry...
> def cv (a: set): class := << {x | wel x a} >>
> (cab x(wel xa)) {
I do not like very much the fact that you can also use "direct" term
constructors in "human-readable" expressions, instead of passing by some
construct defined with "notation", "infix" or whatever. Could not this
be avoided by extending a little bit the scope of the local notations
introduced a few lines above, and changing some of them so they do not
clash? It would also simplify the parser, probably, because there would
be less rules to consider and less interactions to check when proving
unambiguity. [semi-cosmetic]
> coercioncv: set -> class;
This basically means that a set can automatically be converted to a
class by introducing a cv constructor, without it to be explicit in the
expression, right?
It may be that a more traditional parsing approach would be
fine for "metamath zero". Many parsing systems build up a parse table.
It shouldn't be too hard to experiment with building parse tables as you go.
I'd start with LALR, simply because that's well-known and relatively general,
but that'd be my starting guess.
You can also borrow a trick from ANTLR to make the tool more user-friendly:
directly convert left (or right recursion) if it's expressed within the rule itself.
First, let's review the problem. (Mario, I know you understand the
left/right recursion issue; I'm explaining some details so everyone else can follow along.)
As noted in many places like https://lambda.uta.edu/cse5317/notes/node21.html
In LALR(1) the opposite or LL(1) is true: left recursion is good, right recursion is bad.
E.g., this is typically a bad rule for LALR(1) parsers:
L ::= id , L
| id
"which captures a list of ids separated by commas. If the FOLLOW of L (or to be more precise, the expected lookaheads for L ::= id) contains the comma token, we are in big trouble. We can use instead:"
L ::= L , id
| id
LL(1) parsers have the opposite problem: You need to rewrite the other way, to use the
first rule form and not the second rule form.
What the ANTLR folks realized was that the common case (shown above) is really
easy to automatically rewrite just like a human would do it. If you do that, the tool
is MUCH easier to use correctly, yet the implementation code to do it is trivial.
You could still have special shorthand to add rules like your "possibility 2",
but they could just be shorthand for rules you *could* spell out directly.
The unambiguity proof is then trivial: Generate the parse table (as normal)
as you go, and if there's a conflict (e.g., shift/shift, shift/reduce, or reduce/reduce),
then the rule that created the ambiguity is in error.
In this case precedence rules might be a good thing - they'll eliminate many kinds
of structures that would otherwise be an ambiguity.
Il 22/02/19 09:50, Mario Carneiro ha scritto:
> So maybe the best solution is just to not have any file splitting
> primitives in this language at all. They get generated from some other
> language that has file splitting capabilities, and the result file is
> all stuck together.
I am not sure I have understood the discussion on modularization: I
believe that properly supporting modularization is important and that it
should be something more clever than C's #include. Something like
Python's import would be much better (I have myself been thinking for a
while on a different format proposal for Metamath; not seriously enough
to actually have a real thing, but Python-styled modularization was one
of the points I had in mind; also OpenTheory is built along the same
lines, if I remember correctly).
However, I would not be able to comment more in depth so far, so I will
wait for a more complete proposal by Mario. Except for one point: I
would like that for the sake of verifying the proof of a specific module
there would be no need to know the proofs for the modules the first
module depends on. But I believe this is already one of the firm points
by Mario.
> As for parsing solutions, I am down to two possibilities at the moment.
>
> 1. Specify grammar rules as a CFG, and check for unambiguity via a
> deterministic pushdown automaton in the proof file.
> 2. Restrict the kinds of notations to a small collection of
> possibilities, i.e. infix operators, binders, and precedence, in such a
> way that nondeterminism or ambiguity isn't possible.
I have already casted a tentative vote for number 1, but I am not as
sure any more. A problem I now see with 1 is that the automaton that
verifies a CFG depends globally on that grammar. This makes
modularization more difficult: introducing a new syntax in a
dependencies requires recomputing the automata for all the modules that
use the dependency, even if they do not use the new syntax. Each time
you include a new module you need to recompute the automaton. This seems
to make practical usage of modules quite complicated. Proposal 2 does
not have this problem, it seems.
Regarding proposal number 2, it seems pretty likely that it wouldn't be
able to handle the grammar currently described in set.mm. However, I
would like to understand if we can support enough features so that a few
notations that are usual in mathematics do not get lost. For example, a
reasonable notation for "limit-type" operators (i.e., limits, integrals,
summations, ...) and for class abstractions. So for example we could add
the provision that derivations of the form "{" SOMETHING_INSIDE "}" are
allowed, provided that tokens "{" and "}" do not appear anywhere else in
the grammar. Or derivations like "integral_sign" EXPRESSION are allowed
with the additional rule that EXPRESSION is as short as possible. I am
not sure these rules actually preserve unambiguity, I am not really an
expert in parsing. It is just a little brainstorming.
> Dual to the "pure"
> modifier, we could have a "strict" modifier on sorts to say that there
> should be no variable sort generated for this sort in metamath, and this
> comes with an attendant restriction that you can't declare a binding
> operator like
>
> term all_class (A: class) (ph: wff A): wff;
>
> that would make the theory more like MK set theory.
I tend to think that I would like to have both "pure" and "strict", and
to defined set as pure and wff and class as strict. It seems to me that
the principle of restricting all that does not have a reason to be
allowed finds good application here.
> The idea is that if there is no notation for a term constructor, then
> this prefix form is still available. In particular, that means that you
> can always write s-expressions if you really want to, and for
> complicated term constructors (say, with 5+ arguments) this may be the
> cleanest way to actually write them. I don't think a fancy infix
> notation should be required for things where a simple prefix notation
> works fine.
I am not completely convinced. In a sense I would like to separate
completely the human-readable notation and the names used in the parsing
tree. However, I can perfectly live with it.
> I'm not exactly sure what to do in the case of overlapping notations,
> but my instinct is to disallow it entirely. In this case we have one
> binary operator "e." that is being repurposed to mean three different
> things throughout the file. You don't want the computer guessing which
> one you mean, and especially for theorems where you have to talk about
> two or more of them at the same time, like df-cv:
>
> theoremdf-cv: $ ae. b<-> wel ab$;
>
> you should be forced to write the expressions in an unambiguous way.
>
> It is up to the person writing the specification file whether they want
> to have a bunch of nonoverlapping global notations, metamath style, or
> scope things more locally. The prefix notation is a backup when you
> don't have a notation for the constructor at the moment (or don't want
> to be bothered defining one).
As I understand it, for both signs "=" and "e." only the class-class
version is used except in the foundation part of the file, isn't it?
So what I would do is reserve the names "=" and "e." for that version, and
invent some other variations like "=_" or "e._" for the set versions,
and of course declare them locally to a context that exists only as long
as the foundations have to be set up. And I would only use those
syntaxes, not the explicit s-expression one.
Or some type inclusion and/or some automatic boxing/unboxing.
I only saw that on GitHub https://github.com/digama0/mm0/blob/master/mm0.md
there is a mention of "coercion
(nameless notation)", with the description:
- Acoercion
between distinct sorts means that the
given syntax axiom will be silently inserted to convert from one sort to another.
Is this something new? Does thiis already exist in metamath somehow?
I don't know metamath enough, so excuse my naive question.
Some random thoughts: How about a compressed meta math,
that makes heavy use of an automated transfer principle, if
we can prove for a class C:
|- phi(C)
We should be also able to prove for a set x:
|- phi(x)
Doesn't provide modern computer science and proof assistant
technology something like union types? Couldn't this be used to
then have the following:
|- phi(t,s)
Where t is can be either class and set, and a sort parameter s.
Some trickery with dependent types.
* It seems that nothing is said about lexing: while for the "outer"
language this is probably not very controversial (I imagine the rules
for most programming languages are in force: tokens are either sequence
of alphanumeric characters, or sequence of symbols; whitespace is
irrelevant except that it always divides tokens; symbol tokens are
divided as soon as they are not a prefix of a valid symbol token), I
believe something is to be said about how to tokenize the "inner"
language, i.e., the one whose grammar is described by the "infix",
"notation", etc rules. My current understanding is that when the outer
lexer finds a dollar character, it keeps reading until it finds another
dollar character that is not immediately followed by another dollar
character (counting parity: if three dollars in a row are found, but not
four, then the first two collapse to a dollar in the formula and the
third one closes the formula, I suppose). Everything in between the two
dollars is the content of the formula, which is treated as a string for
the time being (and runs of two consecutive dollars are collapsed to a
single one, again counting parity). The outer lexer then resumes after
the second dollar token having produced a "formula" expression.
At some there is another lexer that has to tokenize the string. What does
it do?
In Metamath this is fixed easily by saying that tokens are always
separated by spaces. Very easy to implement, but makes formulae not as
readable as possible. Looking at your example file, it seems that you
treat parentheses as always a single token, i.e., they do not need
whitespace around, which is very reasonable. It also seems that they are
the only token that gets such a special treatment.
This is also
reasonable, because they lexer remains rather simple and there are less
surprises for the user. With C one can skip a lot of spaces, because the
list of possible tokens is much more well-known (i.e., tokens are either
alphanumeric or symbols, they cannot mix; there are no way to introduce
new symbol tokens; both of these properties completely fail with
Metamath). It seems that in MM0 the only special treatment is for
parentheses, which have to be treated specially anyway. Again, all of
this is very reasonable to me, I just would like to be sure that I
understood it properly.
* The (non-associative) "infix" directive has disappeared and wb has
become infixl. Is this intentional? Your argument for not considering wb
associative seemed very convincing, in my opinion.
* I see you decided for numeric precedences, in the end. Is this due to
the additional complexity it would bring? I can live with it anyway, I
am just curious.
Thanks again for this work. I'll try to write a parser for the language
when I have some time, so that I can grab a more direct feeling of what
I still do not completely grasp of it.
I wonder if my asmc project[1] can be useful here: it is basically an
experiment in compiler and operating system bootstrapping, but it might
be that its foundations can be helpful for bootstrapping verifier
trustworthiness too.
[1] https://gitlab.com/giomasce/asmc
The bulk of the project is a extremely minimal "operating system"
(written in x86 Assembly; more or less 15 KiB once assembled, although
with some code golfing and disabling debugging pieces it could be made
smaller), whose basically only feature is a compiler for a language I
invented and called G. G is my personal take on how to design a language
which as similar as possible to C, but such that it is very easy to
parse and compile. Two big things change from C: expressions are in
reverse Polish form and the type system is collapsed to a single type,
equivalent to C's int. I think that writing G is not difficult at all
for those who are used to C: you just think "in C" and apply a few
simple changes to the code you are writing down. There is an example of
G code in the repository's README (and, of course, in the repository
itself).
If I got correctly what you are looking for, this might be an ideal
environment for your project: you eliminate as a whole the dependencies
on "grown-up" compilers and on the underlying operating system; all that
remains is a few kilobytes of very simple x86 code implementing a simple
(non optimizing, which is probably a good thing in the context of easy
verifiability) compiler to a simple language to a simple subset of x86.
The runtime environment is as simple as it can be: 32 bit x86 protected
mode with flat addressing, no paging and disable interrupts. In
particular no multitasking, which is known to complicate a lot code
verification.
Of course the dependency on the CPU and on the BIOS remains, and it is
not little thing, but I believe that for the time being that is
basically unavoidable. There are projects around to build CPUs from
discrete components, which might be the further stage, but I believe
that it is very difficult to have enough computing power to verify the
Metamath database.
Also, you still would have to verify self-modifying code, but again I
guess that if you want to work with compilers there is not much to do.
Unless you want to directly implement the Metamath verifier in x86
(instead of in G), and then verify the whole x86 implementation. I don't
think it would be better.
What do you think of this? My idea was to write a Python implementation
of MM0, but maybe I can switch to G if the above sounds useful. Then a
proof of the G compiler and MM0 verifier should be written in MM0,
assuming that CPU and BIOS behave as specified (as I said, I fortunately
depend on a relatively little interface of both the CPU and the BIOS).
Hi,
Il 26/02/19 11:35, Mario Carneiro ha scritto:
> This sounds amazing! Indeed, minimizing dependencies on OS magic to
> the maximum degree possible is part of the goal here. I started
> looking into the literature and got bogged down a bit, so if you've
> already looked into it then that makes it a lot easier. I will need
> to study this language more carefully. Do you have a full
> specification, or is it just the description in the readme?
Good, I am very happy of that! By the way, I did not really perform any
extensive literature research before starting to work on asmc.
Admittedly, my original purpose was mostly learning low level
programming and basics of operating systems. At some point I discovered
and joined the Bootstrappable community[1], which is also engaged in
projects of this kind, although for the moment I am not aware on anyone
working on things as low level as mine. Ideally asmc should also connect
with other Boostrappable projects, as soon as I have managed to go high
level enough (or they managed to go low level enough).
[1] http://bootstrappable.org/
For the same reason, I do not have a full specification for G at the
moment. Of course feel free to ask questions, and at some point it will
be a good idea to refine the specification (and possibly fix bugs in the
compiler, although I am not currently aware of any).
> This sounds like a great idea. If you're interested I'll help however
> I can.
Good. I think I can begin coding a MM0 parser in G and, as soon as we
have a proof format suggestion, a proof verifier.
As soon as you gain
some confidence with G, which again I do not think to be difficult if
you already know C, you can definitely help. Even if I managed to write
a C parser in G, I do not feel really confident with all the details of
the dynamical parser we need for MM0, so your collaboration will be more
than welcome. The G compiler was not written with verification in mind,
so it might require some modifications. Hopefully not too much, and they
can arrive later. Also, at some point we will have to pinpoint what of
the x86 and PC/BIOS architecture is to be assumed, and check that
everything conforms to that.
Maybe as an immediate starting point I can make some boilerplate files
for hosting all of this in the asmc repository, so at least we know
where to start from.
Wow, I am very happy to be able to join asmc and Metamath, two things I
have been spending some time onto in the last months/years!
Do you mind if I post a link of this conversation in the Boostrappable
community? I think they might be interested.
I have a (currenly paused) project based upon a different take: Instead
of verifying a compiler + assembler, I aim for a compiler which will
co-compile high-level proofs about C to low-level proofs about the
resulting binary.
> What do you think of this? My idea was to write a Python implementation
> of MM0, but maybe I can switch to G if the above sounds useful. Then a
> proof of the G compiler and MM0 verifier should be written in MM0,
> assuming that CPU and BIOS behave as specified (as I said, I fortunately
> depend on a relatively little interface of both the CPU and the BIOS).
My main worry is the theorem which states that the verifier will only
verify "true" statements, because this obviously depends on the axiom
system going into it. And naturally, any bugs already present in the
axioms can be carried forward into the "child" verifier.
> For the context, in the asmc repository there is already a C compiler
> written in G, which is less than perfect, but still able to compile a
> patched version of tinycc. The idea is to use tinycc in turn to compile
> a patched and stripped down version of Linux and eventually of GCC and
> the rest of a GNU operating system, so that the whole system is
> bootstrapped and compiled from the initial 15 KiB binary blob (and, as
> usual, the BIOS).
This brings in a whole lot of unchecked code, no?
So meta math somehow combines syntax issues with
logic. Quite unusual, that the syntax is not abstracted
away before hand. So it can extend its own syntax,
and not only the logic.
But then there was the ((f paradox
or something, if I remember well, so some syntax hygenie
would be nevertheless quite useful. Could be challenging!
On Wednesday, March 13, 2019 at 8:24:59 PM UTC+1, j4n bur53 wrote:My current take would be, that similar like overloading
of "in" and "=" by classes, that it would effectively be:
((R A) B)
((F A) B)
With usually juxtaposition from Lambdacalculus
application. But overloading the application somehow.
I have no clue how the infix reordering should
happen, during parsing/unparsing, since R and
F would not be some known constant with some
infix declaration. Although could allow local operators
declarations for variables as a novellity. So that
where a variable is used, it can use something else
than the default. Maybe the default would also
include R(A,B) and F(A,B), so that no definitions
for it would be necessary. I am perplexed!
On Wednesday, March 13, 2019 at 8:11:14 PM UTC+1, j4n bur53 wrote:How will metamath zero deal with:
wff A R B
http://metamath.tirix.org/wbr.html
class ( A F B )
http://metamath.tirix.org/co.html
I see a lot of problems, invisible function symbols, parenthesis
that are usually irrelevant for typing.
--
Nice!
So instead of "$= .... op $., it is (op ....), right?
Will the system be "single sequent" or something along "multi sequent".
Just by accident I claimed that the metamath verification steps $= $. are
FORTH. But in FORTH there are operations such as:
dup(<x,x1,..,xn>) = <x,x,x1,..,xn>
https://en.wikipedia.org/wiki/Forth_%28programming_language%29
So I wonder whether metamath or metamath zero has ever thought
about some such extension.
I see a problem, that it is currently
not possible since assertions are named:
${
mp.1 $e |- ( A -> B )
mp.2 $e |- A
mp $a |- B
$}
But maybe if a group would be named, this would qualify
as a multi sequence inference rule, which are already
known from Gentzens work and also found in natural deduction:
and-elim ${
$e |- ( A /\ B )
$a |- A
$a |- B
$}
Just currious.
Possibly also difficult for systems that use formulas-
as-types, since formulas-as-types was primarily conceived
for "single sequent". Can Lean Prover do multi-sequents?
Bye
Disclaimer:
and-elim ${
$e |- ( A /\ B )
$a |- A
$a |- B
$}
Is not typical multi-sequent, what would be multi-sequent would be:
or-elim ${
$e |- G, ( A \/ B )
$a |- G, A , B
$}
Where comma is used to separate multiple formulas in the
RHS of |-. And G can match multiple formulas. So there would
be extra types maybe.
End of Excursion
On Sunday, March 24, 2019 at 12:07:17 PM UTC+1, j4n bur53 wrote:The example and-elim lacks LHS of a sequent, but
I guess the "deduction form trick" could be also applied.
And the multi-RHS is not inside the sequent but
in the verification languages itself, if it were FORTH.
Maybe too complicated without any benefit. So metamath
might be qualified as FORTH without stack operations?
On Sunday, March 24, 2019 at 12:03:31 PM UTC+1, j4n bur53 wrote:
Nice!
So instead of "$= .... op $., it is (op ....), right?
Will the system be "single sequent" or something along "multi sequent".
Just by accident I claimed that the metamath verification steps $= $. are
FORTH. But in FORTH there are operations such as:
dup(<x,x1,..,xn>) = <x,x,x1,..,xn>
https://en.wikipedia.org/wiki/Forth_%28programming_language%29
So I wonder whether metamath or metamath zero has ever thought
about some such extension. I see a problem, that it is currently
not possible since assertions are named:
${
mp.1 $e |- ( A -> B )
mp.2 $e |- A
mp $a |- B
$}
But maybe if a group would be named, this would qualify
as a multi sequence inference rule, which are already
known from Gentzens work and also found in natural deduction:
and-elim ${
$e |- ( A /\ B )
$a |- A
$a |- B
$}
Just currious. Possibly also difficult for systems that use formulas-
as-types, since formulas-as-types was primarily conceived
for "single sequent". Can Lean Prover do multi-sequents?
Bye
Wont you need higher order unification sooner or later
for your forrays? if you start using expressions such as
> * Definitions are checked by a slightly different process.> term wal {x: set} (ph: wff x): wff;
> A term might have a type dependency, such as
If I am not totally wrong, if I remember well Isabelle has
some higher order unification somewhere in its "pure" bowels.
So far I was impressed by meta math, that it doesn't need
higher order unification, its rather some variables constraints
but not really a notion of higher order unification.
P.S.: BTW: Isabelle People having a Look at Lean:
Coinduction, using Impredicativity of Prop
https://gist.github.com/gebner/66bab9997d4b37fdb0884f7bcab95418#file-coindpred-lean
I hope its a tame impredicativity available in Lean...
I think Isabelle/HOL uses some form of higher order unification,
according to these slides:
http://www21.in.tum.de/~ballarin/belgrade08-tut/session03/session03.pdf
"perform induction by guessing the induction hypothesis.", yes could
be such a situation. But its already helpful for implementing
the very subject logic itself. For example (exists intro, see also slides):
G |- A(t)
-----------------------
G |- exists x A(x)
When you do backward proof search, Isabelle/HOL can provide
you with new schematic variable aka holes, correctly constrained.
Maybe Lean can also show such holes. Dunno
On Fri, May 3, 2019 at 5:19 AM Giovanni Mascellani <g.masc...@gmail.com> wrote:I brushed off my project
of writing an independent verifier in G. In preparation for that I began
writing a Python implementation, to understand the specification and to
use it as a model for the G implementation. I'm working on a clone of
your repository which is here:
https://github.com/giomasce/mm0/blob/master/mm0-py/parser.pyGreat! The more languages the better.
Hi,
Il 03/05/19 12:37, Mario Carneiro ha scritto:
> Also, on the subject of spec changes, "var" and { scopes } are
> deprecated. The formal specification doesn't deal with them at all, and
> I wasn't planning on implementing them in future verifiers (the Haskell
> verifier will support a few eliminable extensions to the language for
> development convenience; "include" is on the todo list).
Does this mean that variable inference as a whole will be removed?
Because I think it does not make sense to have variable inference
without "var", does it?
Also (but this is probably minor), specs say that "Bindings come in
three groups: the (nondependent) bound variables, the regular variable
bindings, and the formulas. It is an error for the explicit bindings to
come out of order". However, examples/set.mm0 contains
axiom ax_inf {x: set} (a: set) {y z: set}:
$ E. x (a es. x /\ A. y (y es. x -> E. z (y es. z /\ z es. x))) $;
which seems to violate that requirements (there are bound variables
after regular variables). The set.mm0 generated from set.mm does not
have this problem.
I'm really excited about where this is all headed. Finally an IDE I'm not ashamed of. I have a while to go before I can start redirecting these efforts toward metamath proper, but I think one application is to be able to use this as a metamath shim.
Hi Mario,
Like many I'm following your developments with great interest,
but I have not started playing with it.
I've been enjoying a lot developing proofs in MM using MMJ2. What
would be the future of that?
Do you expect that the tools in MM1 will be so advanced that people will prefer using MM1 instead of MM, or do you expect developments on set.mm (and iset.mm) to continue in the MM format?
Will it be possible to have developments made in MM1 flow back
into set.mm?
In short, what would be the different development pipelines?
Thanks,
BR,
_
Thierry
I've been enjoying a lot developing proofs in MM using MMJ2. What would be the future of that?
Will it be possible to have developments made in MM1 flow back into set.mm?
In short, what would be the different development pipelines?
- [MMJ2] MM -> MM0 -> publication in HOL/lean?
- [mm0-hs] MM1 -> MM0 -> MM?
I've tried it and it seems promising indeed.But the whole interface isn't looking very interactive right now. The only feedback that I can get is editor's pop-up while hovering mouse over a problematic token.
And autocomplete isn't very helpful either yet.
Of course, the project is in a very early stage and so this is expected. Are you planning an improved interface for the future and what are limitations of VSCode extension?
Also Plan9 interface comes to mind where any text is in fact a widget.
I can in principle do anything listed here:Let me know if you see anything that would fit well with the proof authoring experience.
Another option which Lean uses but doesn't fall under any standard vscode feature is to have a "goal view" which pops up on the right side of the screen and basically shows you the result of (stat) continuously. To do this I would have to extend the vscode extension, which I am loath to do because it decreases the portability of the extension. Currently all the work is being done from mm0-hs as a backend server and communication goes via the language server protocol, which means that it should be easy to set up a similarly featureful editor mode for any other LSP supporting editor, such as emacs.
Also Plan9 interface comes to mind where any text is in fact a widget.Could you elaborate on this? I haven't used Plan9, what's the interaction story there?
On Monday, July 22, 2019 at 1:04:34 AM UTC+3, Mario Carneiro wrote:I can in principle do anything listed here:Let me know if you see anything that would fit well with the proof authoring experience.It seems that text formatting could be useful to provide some visual feedback.
Another option which Lean uses but doesn't fall under any standard vscode feature is to have a "goal view" which pops up on the right side of the screen and basically shows you the result of (stat) continuously. To do this I would have to extend the vscode extension, which I am loath to do because it decreases the portability of the extension. Currently all the work is being done from mm0-hs as a backend server and communication goes via the language server protocol, which means that it should be easy to set up a similarly featureful editor mode for any other LSP supporting editor, such as emacs.Sure, that's reasonable. I think that using VSCode's webview should also be ruled out as non-portable. Though, I can't help but think about elisp integration.
Also Plan9 interface comes to mind where any text is in fact a widget.Could you elaborate on this? I haven't used Plan9, what's the interaction story there?In Plan 9 everything is a server, so its text editor Acme is a server which can interpret ranges of text as a message. So, you could just type something like "Open" in one of its buffers and use special mouse click to send it as a message to Acme server which opens a file. In fact the idea comes from Oberon and is described here. But I don't know if that's possible with LSP.
What do you mean? Are you talking about an MM1 code formatter (i.e. fixing spacing and indentation)?
Okay, after this and some research I have a sense of what this looks like. But how would you use something like this in MM1? What kind of interactions are you considering?
This is not always a valid move, for instance if the lisp program also has some side effects like adding new definitions and so on, but I imagine that many tactics will fit this mode of operation, and as long as the user is in the driver's seat here they can choose to use it only when it is applicable.
Something that comes close to this is the "code action" feature. If you type the right text (something that the LSP server decides deserves a code action), vscode will pop up a lightbulb next to the text, and if you click on it you can perform some set of actions. Usually this is used in programming languages for doing refactoring operations: you click on a variable name, the lightbulb pops up, you find "export to method" or similar in the dropdown and click on it, and the server updates the content of the file with the result of the refactoring.
Writing tactic for a specific theorem seems too cumbersome if I could just do it directly.
In ideal world one should simply point to one of the options and apply it to the theorem in question to get a feedback, then either proceed to another iteration or undo and return to a previous state. Also, visual clues are very important. I can't see too much if my workspace is cluttered with irrelevant information. In fact I might want to concentrate on different parts at different times, e.g. I can select different premises and conclusions to apply deduction rules or unification. I've tried this in mmj2 and found that there is not much feedback, but unification and derivation features were most useful.
This is not always a valid move, for instance if the lisp program also has some side effects like adding new definitions and so on, but I imagine that many tactics will fit this mode of operation, and as long as the user is in the driver's seat here they can choose to use it only when it is applicable.Exactly. I find it inconvenient when the system is doing something unexpected and I want to examine it first and then make a decision myself.
On Mon, Jul 22, 2019 at 7:13 AM vvs <vvs...@gmail.com> wrote:Writing tactic for a specific theorem seems too cumbersome if I could just do it directly.That depends on the tactic, and the theorem. The point of tactics is that they automate proof writing, and the reason the tactic language is commingled with the definitions and proofs is because some tactics are domain specific.
In MM1 the primary interaction mode is just using a "refine" proof (automatically called if you give a quoted lisp expression as the proof, like all the current proofs in peano.mm1). Here's a real interaction session I just had with MM1 for proving the equivalence relation theorems for class equality:theorem eqsid: $ A == A $ =
'_;
^ |- A == A
'(ax_gen _);
^ |- ?a e. A <-> ?a e. A
'(!! ax_gen x _);
^ |- x e. A <-> x e. A
'(!! ax_gen x biid);
I would argue that the number of characters I have to type here is about the same as you would get with an mmj2 proof, but because the computer isn't constantly rewriting the text area I have more control and don't have to move the cursor all around the place.
Exactly. I find it inconvenient when the system is doing something unexpected and I want to examine it first and then make a decision myself.This is also an advantage of having a complete record of your interaction on the page so you can edit it as desired.
On Tuesday, July 23, 2019 at 3:00:55 AM UTC+3, Mario Carneiro wrote:On Mon, Jul 22, 2019 at 7:13 AM vvs <vvs...@gmail.com> wrote:Writing tactic for a specific theorem seems too cumbersome if I could just do it directly.That depends on the tactic, and the theorem. The point of tactics is that they automate proof writing, and the reason the tactic language is commingled with the definitions and proofs is because some tactics are domain specific.I suspect that my reasoning is too vague. I didn't argue against tactics per se and I just want to point out that they are redundant in case when I can just choose from several ready alternatives. Also reasoning about visible alternatives is much easier than about results of code execution.
In MM1 the primary interaction mode is just using a "refine" proof (automatically called if you give a quoted lisp expression as the proof, like all the current proofs in peano.mm1). Here's a real interaction session I just had with MM1 for proving the equivalence relation theorems for class equality:theorem eqsid: $ A == A $ =
'_;
^ |- A == A
'(ax_gen _);
^ |- ?a e. A <-> ?a e. A
'(!! ax_gen x _);
^ |- x e. A <-> x e. A
'(!! ax_gen x biid);And that's just fine as far as I'm concerned. The problem which bothers me in the first place is that there is no feedback on available alternatives. If I don't know the names of required theorems in advance then I don't have a tool to find them.
And what I meant was that some form of a context menu would be nice so it was possible to click on the link and get a list of theorems suitable for unification with online docs as well. That's what CodeLens is good for.
I would argue that the number of characters I have to type here is about the same as you would get with an mmj2 proof, but because the computer isn't constantly rewriting the text area I have more control and don't have to move the cursor all around the place.And that's good. But the point is that I have no control over unification and should know all the theorems in advance. In mmj2 I can get an interactive list of candidates. Also I don't have to put all the hypotheses there myself as they would be inferred if I chose so.
Exactly. I find it inconvenient when the system is doing something unexpected and I want to examine it first and then make a decision myself.This is also an advantage of having a complete record of your interaction on the page so you can edit it as desired.Right now I just have a "command line prompt" before me (well sort of) and I'm stumbled on what my choices actually are. Your examples at least show how it supposed to work but there is not enough interactive hints yet.
I hope that these are just temporary restrictions and you didn't take this discussion as a sign of discouragement. I really like how it goes and in what direction.
In the right hands you can go very far with automation style proofs. Unfortunately I don't think those hands are mine, since I seem to have inherited a DIY attitude from metamath and don't make much essential use of tactics even when they are available.
(By the way, did you notice the automatic definition unfolding? Sweet!)
On Tuesday, July 23, 2019 at 3:00:55 AM UTC+3, Mario Carneiro wrote:
(By the way, did you notice the automatic definition unfolding? Sweet!)
What definition and where? MM1 or lisp? Can you give an example?
BTW, there are a few other things that seem inconsistent to me. Some variables have no hovers in math block, e.g. ps in ax_3.
Also, variable names are not unified in hovers, so there could be ph->ps->ch in one place and b in another, e.g. in a1i. That means more mental calculation steps for user and is different from mmj2.
Another cosmetic problem: there is a MM0 traceback every time when some vscode settings are changed "haskell-lsp:no handler for. Object (fromList [("params",Object (fromList [("value",String "off")])),("method",String "$/setTraceNotification"),("jsonrpc",String "2.0")])".
there would need to be some sort of API that would at least let you (1) find current theorem state, and (2) allow you to apply/undo those options (and see the new state). Being able to find out "possible options" would be nice too.
So that means that almost all definition unfolding is implicit, although you will be able to unfold explicitly too using a "change" tactic that says "this is my goal now, make sure it's the same as the current goal up to unfolding definitions".
This should go a long way to recovering a bit of the readability of mmj2 worksheets. (Another alternative for getting this style is a lisp command like (explode 'a1d) that will display the proof in an mmj2 like style, with steps and hypotheses and statements on every step.)
On Wednesday, July 24, 2019 at 3:08:04 AM UTC+3, Mario Carneiro wrote:So that means that almost all definition unfolding is implicit, although you will be able to unfold explicitly too using a "change" tactic that says "this is my goal now, make sure it's the same as the current goal up to unfolding definitions".Ah, so more automation just like applying ax_mp. That's what tactics are good for. I can imagine that some tactics could be written that will enable natural deduction or even sequent calculus proofs, though I don't know what should be done about user interface.
This should go a long way to recovering a bit of the readability of mmj2 worksheets. (Another alternative for getting this style is a lisp command like (explode 'a1d) that will display the proof in an mmj2 like style, with steps and hypotheses and statements on every step.)I bet that in the end you will get almost as many lisp tactics as there are theorems and that means the same problem with finding their names. I think that UI should provide a good solution to explore everything in the library which is a big limitation in mmj2.
--
You received this message because you are subscribed to the Google Groups "Metamath" group.
To unsubscribe from this group and stop receiving emails from it, send an email to metamath+u...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/metamath/E1hqOri-00043j-TG%40rmmprod07.runbox.
--
You received this message because you are subscribed to the Google Groups "Metamath" group.
To unsubscribe from this group and stop receiving emails from it, send an email to metamath+u...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/metamath/E1hqjy4-0006Ya-J7%40rmmprod07.runbox.
On Wed, 24 Jul 2019 23:44:05 -0400, Mario Carneiro <di....@gmail.com> wrote:
> * I personally don't find f(e1 e2) to be a major improvement of notation
> over (f e1 e2). I haven't used a language where function calls are written
> as f(x) since I stopped writing java... it makes a difference for a language for
> which I am the primary user
The f(...) function call syntax is really widely used in programming languages
(e.g., C, C++, Java, C#, JavaScript, Python, Ruby, PHP, Perl, Rust, Fortran, Ada).
That makes it familiar, and familiarity helps.
I imagine you want to eventually be one of many.
MM0 is coming from the tradition of another language family: Lisp, ML, Haskell, Elm, Idris, Coq, Agda, Lean. These language communities are inspired by Church's lambda calculus model of stateless computation, leading to functional programming, as distinct from the Von Neumann style of "model the machine as a finite state machine with an addressable memory" that lead to BCPL, B, and C, later augmented with object oriented style in C++ and Java. Most of the rest are some combination of these, without any firm position (Ada and Rust are exceptions).
After spending years in formal theorem proving, looking back at programming languages with an eye for correctness, they are to my eyes almost all abhorrent monstrosities which have long since diverged from anything approaching a formal semantics. C and C++ are trying valiantly to write it all down, and as a result we can look at the thousands of pages of standardese to see quite plainly how monstrous they are (C++ of course being a much worse offender). Lisp was the first major language to say "you can have a simple and correct programming language that is also practical", and I want to get back to that ideal. Ultimately, the goal is not MM0 - it is whatever language compilers we can build from verified components in MM0.
--
You received this message because you are subscribed to the Google Groups "Metamath" group.
To unsubscribe from this group and stop receiving emails from it, send an email to metamath+u...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/metamath/415c7823-f62d-601b-7a60-a157eeb12f6f%40gmail.com.
Hi,
Il 19/05/19 10:31, Mario Carneiro ha scritto:
> I've also started working on mm0-c and the binary file format. I don't
> have anything specific to share yet, but one interesting feature is that
> the format will serve as read-only memory for the verifier itself, so
> that I think I can achieve verification with O(1) read/write memory (no
> stack! no heap!) and linear time checking (not exactly streaming, but
> with limited random access) so that it really can run in low memory
> contexts. The basic idea is to represent the stack as a linked list and
> embed it in the proof stream itself. Because the stack changes over
> time, it actually appears as a forest instead of a linked list. Each
> proof step contains these stack backreferences in addition to a possible
> heap backreference for actual "Z" steps.
Are there news about the binary format and the C verifier?
I tried it today, but I can make little sense to what it does. In particular, it
gives no feedback and it doesn't seem to read the MM0 file at all.
Sorry, I must have missed this the first time round.
So proofs that are notorious resource hogs like the Four Color Theorem and the Kepler Conjecture would "just" be very very large files? That's exciting!
But I suppose at that scale streaming becomes important because ideally you'd want an (untrusted) program generating the proof and a verifier checking it, without ever having to store a proof as large as Flyspeck on disk?
The proofs would already be very large files, wouldn't they (except that
nobody has coded them in Metamath yet, to my knowledge)?
I think the point made by Mario is that it would be nice to have a verifier running
in constant RAM space and taking a streaming input, which is something
that does not happen currently with Metamath (because the verifier has
to store all defined assertions and symbols to verify subsequent
theorems). I guess Mario's idea is that the readonly input already has
all the information about the defined assertions and symbols, so the
verifier just have to check them and then it can read them again. This
last bit is probably the part that makes in non streaming, and I wonder
if there is a way around.
> But I suppose at that scale streaming becomes important because ideally
> you'd want an (untrusted) program generating the proof and a verifier
> checking it, without ever having to store a proof as large as Flyspeck
> on disk?
The wild dream here (or, at least, my wild dream), if a real streaming
verification can be done, is to implement it within the CPU cache at
computer boot time, so that it can be ran even before the BIOS
initializes the dynamic RAM (so you don't have to depend on the
untrusted BIOS code). Of course this thing has a lot of technical
difficulties, including finding a way to actually stream the database in
and the result out (maybe using a serial port or GPIO, which might be
reasonably easy to initialize even without running the BIOS). Take it as
a dream, nothing more.
Incidentally, one of the results of the verification might be the
generation of code to start initializing the machine. This would
implement trusted execution of verified code from the firmware itself.
Even more of a dream.
What is reasonably attainable in the short term, and the reason why I
was asking Mario for updates on his C verifier, is to do verification at
kernel time (i.e., after BIOS has run, but before an operating system is
loaded). I already have a good part of what is needed for this, but need
to understand the C verifier before.
--
You received this message because you are subscribed to the Google Groups "Metamath" group.
To unsubscribe from this group and stop receiving emails from it, send an email to metamath+u...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/metamath/CAFXXJSuHDKrgBRNwJpdo_2ste0rbQNaZKf9c0DrBO9pa11Prtw%40mail.gmail.com.
Dear Mario,
Hales talks about
> more than 23,000 inequalities. The verification of all nonlinear inequalities in HOLLight on the Microsoft Azure cloud took approximately 5000 processor-hours.
(https://arxiv.org/abs/1501.02155)
This must be in your
> proof trace of Flyspeck that is 159 MB after gzip
because you say it's the Isabelle component that you don't have. You can see from 5000 processor-hours why I was expecting a minimum of terabytes. So HOLLight must be just very high-level and therefore relatively slow to execute then I guess. That's encouraging: no need for streaming as I'd assumed, naturally you want to write it to disc as you say.