fun Engine.reduceMultiplication(statement: Statement): Statement {
val (a, op, b) = statement.children
if (!op.equals(MM_MULTIPLICATION)) throw Exception("wrong operation : $op")
return when {
a.matchComplexMultiplication() -> statement.map(reduceMultiplication(a)).eq(Engine::reduceMultiplication)
b.matchComplexMultiplication() -> statement.map(reduceMultiplication(b)).eq(Engine::reduceMultiplication)
a.isZero() -> statement.eq(laMul02i, b) { statement0 }
b.isZero() -> statement.eq(laMul01i, a) { statement0 }
a.isOne() -> statement.eq(laMulid2i, b) { b }
b.isOne() -> statement.eq(laMulid1i, a) { a }
a.isDigit() && b.isDigit() -> statement.eq { multiplies2digits(a, b, statement) }
a.isDigit() && b.hasDecimalConstructor() -> useTheorem(laMulcomi, a, b).asProvenEquality().eq(Engine::reduceMultiplication)
a.hasDecimalConstructor() && b.hasDecimalConstructor() -> {
val (b1, b2) = b.children
/** b2 is supposed to be a digit */
if (!b2.isDigit()) return statement
val sA = "( ∨1c x. ∨2c )".with(a, b1)
val sB = "( ∨1c x. ∨2c )".with(a, b2)
useTheorem(neoremDec2Mul, a, b, "; ∨1c ∨2c".with(sA, statement0), sB, b1, b2).asProvenEquality()
.map(reduceMultiplication(sA), reduceMultiplication(sB))
.eq(Engine::reduceAddition)
}
a.hasDecimalConstructor() && b.isDigit() -> {
val (a1, a2) = a.children
/** a2 is supposed to be a digit */
if (!a2.isDigit()) throw Exception("$a2 is not a digit")
val a2TimesB = multiplies2digits(a2, b)
if (!a2TimesB.hasDecimalConstructor()) {
val upperResult = "( ∨1c x. ∨2c )".with(a1, b)
useTheorem(neoremDecMul, a, b, upperResult, a2TimesB, a1, a2).asProvenEquality()
.map(reduceMultiplication(upperResult))
} else {
val (sA, sB) = a2TimesB.children
val s1 = "( ∨1c x. ∨2c )".with(a1, b)
val upperResult = "( ∨1c + ∨2c )".with(s1, sA)
useTheorem(neoremDecMulCarry, a, b, upperResult, sB, a1, a2, sA).asProvenEquality()
.map(upperResult.map(reduceMultiplication(s1)).eq(Engine::reduceAddition))
}
}
else -> statement
}
}
private fun Engine.multiplies2digits(a: Statement, b: Statement, aTimesBOrNull: Statement? = null): Statement = when {
a.isZero() -> useTheorem(laMul02i, b).asProvenEquality()
b.isZero() -> useTheorem(laMul01i, a).asProvenEquality()
a.isOne() -> useTheorem(laMulid2i, b).asProvenEquality()
b.isOne() -> useTheorem(laMulid1i, a).asProvenEquality()
else -> {
// handle other digits
val atimesB = aTimesBOrNull ?: "( ∨1c x. ∨2c )".with(a, b)
val digitA = a.asInt()
val digitB = b.asInt()
val intResult = digitA * digitB
// multiplies digits
useTheorem(laMul(digitA, digitB))
// build result
val swap = digitA < digitB
val a1TimesB1 = if (swap) "( ∨1c x. ∨2c )".with(b, a) else atimesB
val result = if (intResult < 10) intResult.toString().toStatement() else "; ${intResult / 10} ${intResult % 10}".toStatement()
// handle 10
if (intResult == 10) useTheorem(laEqtri, a1TimesB1, result, useTheorem(laDec10).asProvenEqualityReversed()/*statement10*/)
// handle commutativity
if (swap) {
useTheorem(laMulcomi, a, b)
useTheorem(laEqtri, atimesB, result, a1TimesB1)
}
result
}
}
assertEquals("; ; ; ; ; ; 1 0 5 4 4 4 3".toStatement(), engine.reduce("( ; 1 3 x. ( ; ; 1 2 1 + ( ( 7 x. ; 8 9 ) x. ( ; ; 1 2 3 + 7 ) ) ) )".toStatement()))
assertEquals("( ( 1 + ( ∨1c x. 2 ) ) + ( ( ∨1c x. ∨1c ) x. 3 ) )".toStatement(), engine.reduce("( ( 1 + ( ∨1c x. ( 1 + 1 ) ) ) + ( ( ∨1c x. ∨1c ) x. ( 1 + ( 1 + 1 ) ) ) )".toStatement()))
assertEquals("; ; ; ; ; ; 1 0 5 1 2 9 7".toStatement(), engine.reduce("( ; 1 3 x. ( ( ( 7 x. ; 8 9 ) x. ( ; ; 1 2 3 + 7 ) ) - ; ; 1 2 1 ) )".toStatement()))
assertEquals("; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; 8 3 2 0 9 8 7 1 1 2 7 4 1 3 9 0 1 4 4 2 7 6 3 4 1 1 8 3 2 2 3 3 6 4 3 8 0 7 5 4 1 7 2 6 0 6 3 6 1 2 4 5 9 5 2 4 4 9 2 7 7 6 9 6 4 0 9 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0".toStatement(), engine.reduceFactorial("( ! ` ; 6 0 )".toStatement()))
t=t.logTime()//18096 ms
This looks impressive. Not having studied the code, I'm somewhat uncertain about what is going on "in between". Are you generating native metamath proofs for these statements? If so, how big are the proofs? Or how big would you estimate them to be?
From the point of view of archiving the results (in say set.mm), what matters is the size of the proofs and possibly (if they are extremely large) the run time of verifying them with a metamath proof verifier. The time needed to generate the proofs is somewhat irrelevant, since it's a one-time activity that is done once and never needed again. Of course, tools to help generate proofs more efficiently are welcome.
Obviously metamath is not well-suited to do routine arithmetic on large numbers efficiently, which has more to do with the nature of complete formal proofs from axioms than with metamath per se. In principle we can call an external "oracle" (calculator) that returns the result of a large calculation, then insert it into the metamath database as a new axiom, but philosophically that is a little ugly and would be a last resort.
An interesting challenge is to figure out how to prove that the results of certain long calculations hold without doing the calculations in the proof itself. A trivial example would be proving that 300! is an even number without expanding it. A more sophisticated example is using a certificate to show a large number is prime. Calculating the certificate can done once outside of metamath (and in principle we don't care how long the calculation takes), but proving the validity of the certificate is a relatively short proof. I think Mario was on the verge of doing this for some primes needed to prove Bertrand's postulate, but in the end direct proofs won out because the size of the numbers wasn't quite large enough to justify the overhead of using a certificate (Mario may wish to comment or correct me).
I have implemented subtraction (I chose option 1 : using help from binary computations) for positive integersassertEquals("; ; ; ; ; ; 1 0 5 1 2 9 7".toStatement(), engine.reduce("( ; 1 3 x. ( ( ( 7 x. ; 8 9 ) x. ( ; ; 1 2 3 + 7 ) ) - ; ; 1 2 1 ) )".toStatement()))and factorials yesterdayBut I hit a big hurdle (performance)computing 30 ! takes 1s but computing 60 ! takes 18s :/
assertEquals("; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; 8 3 2 0 9 8 7 1 1 2 7 4 1 3 9 0 1 4 4 2 7 6 3 4 1 1 8 3 2 2 3 3 6 4 3 8 0 7 5 4 1 7 2 6 0 6 3 6 1 2 4 5 9 5 2 4 4 9 2 7 7 6 9 6 4 0 9 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0".toStatement(), engine.reduceFactorial("( ! ` ; 6 0 )".toStatement()))
t=t.logTime()//18096 msI had previously foolishly tried to compute 300 ! and I had to reboot my computer that had frozen.Though I am using some tail recursivity optimizations, and doing it this way
n! = (n-1)! * n = ((n-2)! * (n-1)) * n = (n-2)! * reduce((n-1)*n) = ...My guess is that there are still a lot of single digits multiplications and slow additions and those pile up (I just observed the issue, I haven't computed the O() in space/time complexity)
I tried doing this calculation (of 60!) with the arithmetic prover I wrote in lean and it takes about 1.2 s to generate
Thanks a lot, it really helps to have this.
It gives me hope that something better can be done and it also
gives me a realistic time to aim for
(within the bounds of what to expect for C++ vs the jvm).
and 0.4 s to verify. Lean is doing a heck of a lot more bookkeeping, so that validates my claim that this should be doable in a fraction of a second in metamath.
--
You received this message because you are subscribed to the Google Groups "Metamath" group.
To unsubscribe from this group and stop receiving emails from it, send an email to metamath+u...@googlegroups.com.
To post to this group, send email to meta...@googlegroups.com.
Visit this group at https://groups.google.com/group/metamath.
For more options, visit https://groups.google.com/d/optout.
This looks impressive.
Not that much. I have only started working with trees and parser
for like 2 weeks. There is not much worth in what I have been
doing yet.
I have not yet solved one non-trivial problem. Also, I am standing
on the shoulders of Giants and they have been doing all the heavy
lifting.
Not having studied the code, I'm somewhat uncertain about what is going on "in between". Are you generating native metamath proofs for these statements? If so, how big are the proofs? Or how big would you estimate them to be?
At this time, I am only exploring what can be done with metamath
and my code (I am researching apis and methods to prove things in
a simple/human way).
The code I use only generates the steps necessary to obtain a
proof.
A month ago, I was using that to translate those steps into an
uncompressed metamath proof that I would then check.
But I haven't rewritten that part yet (I am refactoring, to use trees instead of strings). I'll do it soon though.
so, I only generate pre-metamath proofs, I don't check them yet.
From the point of view of archiving the results (in say set.mm), what matters is the size of the proofs and possibly (if they are extremely large) the run time of verifying them with a metamath proof verifier. The time needed to generate the proofs is somewhat irrelevant, since it's a one-time activity that is done once and never needed again. Of course, tools to help generate proofs more efficiently are welcome.
I understand what you say there.
As I would like also normal people to use the code that I wrote
(and not just math researchers) and
as they would expect computations to take less than 3s, I need the
time to generate the proofs to be as short as possible.
At this point, there is NO optimisations (neither mathematically,
nor in software) for speed, or small size of proofs.
Obviously metamath is not well-suited to do routine arithmetic on large numbers efficiently, which has more to do with the nature of complete formal proofs from axioms than with metamath per se.
In principle we can call an external "oracle" (calculator) that returns the result of a large calculation, then insert it into the metamath database as a new axiom, but philosophically that is a little ugly and would be a last resort.
It might be possible to quickly produce a special kind of proof, that explicitely relies on some computations, that one could check once to promote the proof from "true if computations hold" status to "metamath-true" status
Also, I am really interested in what Mario Carneiro says there :
this, by the way, is one of the planned applications of Metamath Zero (and surrounding projects). The idea is that you can verify a verifier and use it to extend the system you are running, so that you get ad hoc proof procedures with bare metal speed. I've claimed on several occasions that metamath should be able to "execute programs" by means of a proof trace, but the big overhead there is the amount of equality testing, since we are taking the verifier through the steps and also checking at each stage that the verifier is still on board. This checking is not required if we already trust the verifier, so the process can theoretically be sped up by just running the program and asserting the end result. My goal is to find ways to incorporate this into the proof process in a formally sound way.
But, first I have to understand how it can be done and what would
be needed.
An interesting challenge is to figure out how to prove that the results of certain long calculations hold without doing the calculations in the proof itself. A trivial example would be proving that 300! is an even number without expanding it. A more sophisticated example is using a certificate to show a large number is prime. Calculating the certificate can done once outside of metamath (and in principle we don't care how long the calculation takes), but proving the validity of the certificate is a relatively short proof. I think Mario was on the verge of doing this for some primes needed to prove Bertrand's postulate, but in the end direct proofs won out because the size of the numbers wasn't quite large enough to justify the overhead of using a certificate (Mario may wish to comment or correct me).
What you say is true. But this is math research (do things the smarter way). At this point, I'm aiming to allow everyone to do basic maths, but with metamath proof-check truth.
If I manage to build a decent tool achieving this purpose, what
you say would be a good next step though :)
A paper written by Mario is "Arithmetic in Metamath Case Study: Bertrand's Postulate" (in case it wasn't mentioned before; I may have missed it):
http://arxiv.org/abs/1503.02349
http://us.metamath.org/downloads/bpos.pptx.pdf (slides)
http://youtu.be/PF9cL3RABIw
Norm
On Wednesday, March 13, 2019 at 6:28:29 AM UTC-4, Olivier Binda wrote:
I have implemented subtraction (I chose option 1 : using help from binary computations) for positive integersassertEquals("; ; ; ; ; ; 1 0 5 1 2 9 7".toStatement(), engine.reduce("( ; 1 3 x. ( ( ( 7 x. ; 8 9 ) x. ( ; ; 1 2 3 + 7 ) ) - ; ; 1 2 1 ) )".toStatement()))
and factorials yesterdayBut I hit a big hurdle (performance)
computing 30 ! takes 1s but computing 60 ! takes 18s :/
assertEquals("; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; 8 3 2 0 9 8 7 1 1 2 7 4 1 3 9 0 1 4 4 2 7 6 3 4 1 1 8 3 2 2 3 3 6 4 3 8 0 7 5 4 1 7 2 6 0 6 3 6 1 2 4 5 9 5 2 4 4 9 2 7 7 6 9 6 4 0 9 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0".toStatement(), engine.reduceFactorial("( ! ` ; 6 0 )".toStatement())) t=t.logTime()//18096 ms
I had previously foolishly tried to compute 300 ! and I had to reboot my computer that had frozen.
Though I am using some tail recursivity optimizations, and doing it this way
n! = (n-1)! * n = ((n-2)! * (n-1)) * n = (n-2)! * reduce((n-1)*n) = ...My guess is that there are still a lot of single digits multiplications and slow additions and those pile up (I just observed the issue, I haven't computed the O() in space/time complexity)
say a multiplication between 2 numbers with digits in the hundreds should require quite some amount of work (including associativity, commutativity, additions, single digit multiplications, ...)
Proving operations at the bare metal (metamath) level is really resource intensive
--
Le 13/03/2019 à 18:19, Mario Carneiro a écrit :
> his, by the way, is one of the planned applications of Metamath Zero
> (and surrounding projects). The idea is that you can verify a verifier
> and use it to extend the system you are running, so that you get ad
> hoc proof procedures with bare metal speed. I've claimed on several
> occasions that metamath should be able to "execute programs" by means
> of a proof trace,
I saw that in a previous mail but I only had 5 minutes to ponder it and
failed to truely grasp what you said (mostly what/how it should be done)
While you do a computation in a software, you produce a log of the
operations (with trees) you do and you verify it later with a metamath
veryfier, is that it ?
> but the big overhead there is the amount of equality testing, since we
> are taking the verifier through the steps and also checking at each
> stage that the verifier is still on board. This checking is not
> required if we already trust the verifier, so the process can
> theoretically be sped up by just running the program and asserting the
> end result. My goal is to find ways to incorporate this into the proof
> process in a formally sound way.
I am really really interested in what you say here.
But, as I have very little time each day to do metamath work, I don't
see/understand yet how this could be done.
I have been following from very far the discussions about Metamath zero
but I don't understand the deep meanings/ideas behind what is said (no
time to ponder :/).
I would really really love some theory/explanation/sketch about what
would be needed for a metamath (or metamath zero) system to be extendable
and to be extended and some hint about how to do it
if I understood it and if I was convinced that it should be doable, I
would try to implement it.
assertEquals("; ; ; ; ; ; 1 1 1 3 7 4 9".toStatement(), engine.reduce("( ( ; 2 6 x. ( ( ( 7 x. ; 8 9 ) x. ( ; ; 1 2 3 + ( 7 x. ( ! ` 5 ) ) ) ) - ; ; 2 3 8 ) ) / ; 1 4 )".toStatement()))
t = t.logTime()
assertEquals("; ; ; ; ; ; 1 4 5 6 4 4 1".toStatement(), engine.reduce("( ( ( ; 2 6 + ( 2 ^ 3 ) ) x. ( ( ( 7 x. ; 8 9 ) x. ( ; ; 1 2 3 + ( 7 x. ( ! ` 5 ) ) ) ) - ; ; 2 3 8 ) ) / ; 1 4 )".toStatement()))
Thanks for your answers. I understand a lot more now and you gave me a solution to a problem I had.
I was wondering how I could extend my system to allow user to write "tactics" and I realize now that I could :A) Generate code, compile it and dynamically integrate the binary result to my running app.
One source code is generated, on a linux/mac/windows box (but not on android/ios/a browser) it is possible to call an external compiler if it is setup.All the heavy lifting would (again) be done by world experts (the people writing the kotlin compiler). But it would make it possible to use a battle tested and well knowed heavy usage language to do tactics programmingB) write an interpreterC) Another possibility would be to integrate a scripting language, like lua, but it would be cumbersome to have a bridge to communicate and translate objects to lua.D) then there is the possibility that you are researching (Metamath zero and a minimal footprint language, hopefully with a simple compiler)
I think that I'll first experiment with possibility A. ^^Also, I am wondering
1) how it would be possible to speed up multiplications with a Verifier B, whose code would be checked by a veryfier A.Basically, it would solve a lot of things to have multiplications as fast as a CPU can do them, but how can we trust those (with a metamath-like level) ?cpu do things and have bugs, how can we be sure that they behave like they are modelled to be ?
2) how would using a small footprint language improve the trust in the verifier in a meaningful way ?
(there is still the os underneath except if you build a very simple one that works on some generic motherboard, but this brings it's own can of worms...isn't it overkill ?)
Also, I implemented power and division for positive integers. Agian in a dumb (but simple) manner.
I took note of the possibilities of speed up that you mentionnedassertEquals("; ; ; ; ; ; 1 1 1 3 7 4 9".toStatement(), engine.reduce("( ( ; 2 6 x. ( ( ( 7 x. ; 8 9 ) x. ( ; ; 1 2 3 + ( 7 x. ( ! ` 5 ) ) ) ) - ; ; 2 3 8 ) ) / ; 1 4 )".toStatement()))
t = t.logTime()
assertEquals("; ; ; ; ; ; 1 4 5 6 4 4 1".toStatement(), engine.reduce("( ( ( ; 2 6 + ( 2 ^ 3 ) ) x. ( ( ( 7 x. ; 8 9 ) x. ( ; ; 1 2 3 + ( 7 x. ( ! ` 5 ) ) ) ) - ; ; 2 3 8 ) ) / ; 1 4 )".toStatement()))Sadly, I haven't understood the trace thing yet but I remember that you were talking about it in your article about arithmetics, so I'll go read it once more.
Wow. Thanks Mario. I really love your answers.
Thanks for the insight about trust and security. I had guessed
why you wanted maximal security and why you wanted to go all the
way to get it,
but hadn't got the reasoning behind. It makes a lot more sense now
for me, especially the VerifierA proves VerifierB thing and the
trust contract with the user.
Also, I have also understood the trace thing.
My code is actually implemented the way you describe it (with
recursion and simple steps that have proofs).
I was anticipating that it would be possible to plug in
arbitrarily smart/fast procedures this way and I'm happy to see
some confirmation of that fact in your answer (that is the way I
wanted it to be, there should be better things down the road then
performance and mathematically wise :) )
I also got a bonus : I hadn't understood the certificate thing either but now I do (you are really great at teaching things actually).
with further thought I realized also that I could also do a
better A solution for my problem :
I generate code, compile it on my computer with the kotlin compiler and statically generate a working solution for all platforms (instead of letting users doing it, if their setup allows it).
So, I am really happy. I am still concerned about a few technical
issues though :
to create a higher level verifier, I can generate code but I have
to model it (it is my trust contract with users).
So I need a metamath(like) language to model maths and a
computational language to model a higher level verifier/
computations
a) Does it require modeling metamathematics ? arithmetic ? a new
language ? some new axioms in metamath ?
b) metamath was avoiding a number model to stay simple, it
violates this principle, right ? or is there a way to keep simple
but do higher level stuff ?
I would really love to try building such a higher level verifier
(say, with a simple aim first like) that does fast additions
and/or fast multiplications,
also I would open source it's code in the public domain so it
could be used by everyone.
Concerning your metamath zero project.
If I understand correctly, you just need to implement it for one
platform, preferably a simple (not bloated with useless hardware),
widely available and cheap platform.
Something like the raspberry pi 0 (for 5€) an intel compute stick (70+€) or some ARM hardware
You don't need a whole os as you just need to input strings and
do simple output (strings or booleans ?)
But you still need to handle parts of the stuff associated with
usb, memory, ...
A single purpose minimalist bare metal "os" (more like malloc and
friends) sounds doable, the meat of the thing
would be indeed the metamath zero language design and modeling
verifiers and bootstrapping things
I guess that the professionnal mathematical community could welcome a 5€ device that plugs in a usb port and allow everyone to proof-check maths...
It sounds to me like a worthy goal that could bring a lot of
value.
Wow. Thanks Mario. I really love your answers.
Thanks for the insight about trust and security. I had guessed why you wanted maximal security and why you wanted to go all the way to get it,
but hadn't got the reasoning behind. It makes a lot more sense now for me, especially the VerifierA proves VerifierB thing and the trust contract with the user.Also, I have also understood the trace thing.
My code is actually implemented the way you describe it (with recursion and simple steps that have proofs).
I was anticipating that it would be possible to plug in arbitrarily smart/fast procedures this way and I'm happy to see some confirmation of that fact in your answer (that is the way I wanted it to be, there should be better things down the road then performance and mathematically wise :) )I also got a bonus : I hadn't understood the certificate thing either but now I do (you are really great at teaching things actually).
with further thought I realized also that I could also do a better A solution for my problem :
I generate code, compile it on my computer with the kotlin compiler and statically generate a working solution for all platforms (instead of letting users doing it, if their setup allows it).
So, I am really happy. I am still concerned about a few technical issues though :
to create a higher level verifier, I can generate code but I have to model it (it is my trust contract with users).
So I need a metamath(like) language to model maths and a computational language to model a higher level verifier/ computationsa) Does it require modeling metamathematics ? arithmetic ? a new language ? some new axioms in metamath ?
b) metamath was avoiding a number model to stay simple, it violates this principle, right ? or is there a way to keep simple but do higher level stuff ?
I would really love to try building such a higher level verifier (say, with a simple aim first like) that does fast additions and/or fast multiplications,
also I would open source it's code in the public domain so it could be used by everyone.
Concerning your metamath zero project.
If I understand correctly, you just need to implement it for one platform, preferably a simple (not bloated with useless hardware), widely available and cheap platform.
Something like the raspberry pi 0 (for 5€) an intel compute stick (70+€) or some ARM hardware
You don't need a whole os as you just need to input strings and do simple output (strings or booleans ?)
But you still need to handle parts of the stuff associated with usb, memory, ...
A single purpose minimalist bare metal "os" (more like malloc and friends) sounds doable, the meat of the thing
would be indeed the metamath zero language design and modeling verifiers and bootstrapping things
I guess that the professionnal mathematical community could welcome a 5€ device that plugs in a usb port and allow everyone to proof-check maths...
It sounds to me like a worthy goal that could bring a lot of value.
> You don't need a whole os as you just need to input strings and do
> simple output (strings or booleans ?)
> But you still need to handle parts of the stuff associated with usb,
> memory, ...
>
> A single purpose minimalist bare metal "os" (more like malloc and
> friends) sounds doable, the meat of the thing
> would be indeed the metamath zero language design and modeling verifiers
> and bootstrapping things
There are quite a lot of initiatives in the open hardware community that
might be useful here. For example, I own a little fomu hacker
edition[2], which is basically a little FPGA that fits inside a USB
port. The FPGA chip has a nice open source toolchain thanks to a bunch
of people who managed to reverse engineer it (most FPGA toolchains are
proprietary), and a simple RISC-V soft CPU can be synthesized on it. In
line of principle, if asmc was ported to RISC-V (which is on my to do
list, although not really on the top of it) we would have something like
what your say, built with a fully open source toolchain.
[2] https://github.com/im-tomu/fomu-hardware/tree/master/hacker
The problem is that the FPGA does not have a lot of RAM and storage, so
this is still a little bit theoretical. Maybe a streaming MM or MM0
might be implemented, so that the FPGA does not need to store the whole
database in RAM, but can verify it while the host computer is sending it.
package org.lakedaemon.math.engine.trees.operations
import org.lakedaemon.kotlin.asInt
import org.lakedaemon.kotlin.debug
import org.lakedaemon.math.BigInteger
import org.lakedaemon.math.engine.*
/** compute ( a + b ) AND proves that
* |- statement = reduceAddition(statement)
*
* It must only be called on a co node and it ensures that it is only called on a co(+) node */
fun Engine.reduceMultiplication(statement: Statement): Statement {
// val statement = from(originalStatement)
val (a, op, b) = statement.children
// if ((!a.matchDigit() && !a.matchDecimalConstructor()) || (!b.matchDigit() && !b.matchDecimalConstructor())) return statement
if (!op.equals(MM_MULTIPLICATION)) throw Exception("wrong operation : $op")
return when {
a.matchZero() -> statement.s(laMul02i)
b.matchZero() -> statement.s(laMul01i)
a.matchOne() -> statement.s(laMulid2i)
b.matchOne() -> statement.s(laMulid1i)
a.matchMultiplication() -> statement.a(Engine::reduceMultiplication).eqIfDifferent(statement, Engine::reduceMultiplication)
b.matchMultiplication() -> statement.c(Engine::reduceMultiplication).eqIfDifferent(statement, Engine::reduceMultiplication)
a.matchDigit() && b.matchDigit() -> statement.s(Engine::multiplies2digits)
a.matchDigit() && b.matchDecimalConstructor() -> statement.s(laMulcomi).s(Engine::reduceMultiplication)
a.matchDecimalConstructor() && b.matchDecimalConstructor() -> {
val (b1, b2) = b.children
/** b2 is supposed to be a digit */
if (!b2.matchDigit()) return statement
val sA = "( ∨1c x. ∨2c )".with(a, b1)
val sB = "( ∨1c x. ∨2c )".with(a, b2)
statement.s { result(neoremDec2Mul, a, b, "; ∨1c ∨2c".with(sA, statement0), sB, b1, b2) }
.map(P.a.a to Engine::reduceMultiplication, P.c to Engine::reduceMultiplication)
.s(Engine::reducePositiveIntegerAddition)
}
a.matchDecimalConstructor() && b.matchDigit() -> {
val (a1, a2) = a.children
/** a2 is supposed to be a digit */
if (!a2.matchDigit()) throw Exception("$a2 is not a digit")
val a2TimesB = multiplies2digits("( ∨1c x. ∨2c )".with(a2, b))
if (!a2TimesB.matchDecimalConstructor()) {
val upperResult = "( ∨1c x. ∨2c )".with(a1, b)
statement.s { result(neoremDecMul, a, b, upperResult, a2TimesB, a1, a2) }.a(Engine::reduceMultiplication)
} else {
val (sA, sB) = a2TimesB.children
val s1 = "( ∨1c x. ∨2c )".with(a1, b)
val upperResult = "( ∨1c + ∨2c )".with(s1, sA)
statement.s { result(neoremDecMulCarry, a, b, upperResult, sB, a1, a2, sA) }
.map(P.a.a to Engine::reduceMultiplication)
.a(Engine::reducePositiveIntegerAddition)
}
}
a.matchMinus() && b.matchMinus() -> statement.s(laMul2negi).s(Engine::reduceMultiplication)
a.matchMinus() -> statement.s(laMulneg1i).a(Engine::reduceMultiplication)
b.matchMinus() -> statement.s(laMulneg2i).a(Engine::reduceMultiplication)
(a.matchPositiveRational() || a.matchPositiveInteger()) && (b.matchPositiveRational() || b.matchPositiveInteger()) -> {
// first simplify each fraction with normalized fractions
val aNormalized = a.normalized
val bNormalized = b.normalized
// use (n/1) for integers
val changedA = if (aNormalized.matchInteger()) aNormalized.s2(laDiv1i) else aNormalized
val changedB = if (bNormalized.matchInteger()) bNormalized.s2(laDiv1i) else bNormalized
val s1 = statement.map(P.a to { changedA }, P.c to { changedB })
// then try to cancel factors
// a/c x. b/d = ac / bd
val s2 = s1.s(laDivmuldivi)
val (numA, denA) = aNormalized.rationalValue!!
val (numB, denB) = bNormalized.rationalValue!!
val g1 = numA.gcd(denB)
val g2 = numB.gcd(denA)
// eventually cancel the common factor in numA and denB
val s3 = if (g1 != BigInteger.ONE) {
val numA1 = numA.divide(g1)
val denB1 = denB.divide(g1)
val sG1 = g1.toStatement()
s2.map(P.a to { it.a { reduceMultiplication("( ∨1c x. ∨2c )".with(numA1.toStatement(), sG1)).inv() }.s(laMul32i)},
P.c to { it.c { reduceMultiplication("( ∨1c x. ∨2c )".with(denB1.toStatement(), sG1)).inv() }.s2(laMulassi) })
.s(neoremDivcan5rd)
} else s2
// eventually cancel the common factor in numB and denA
val s4 = if (g2 != BigInteger.ONE) {
val denA2 = denA.divide(g2)
val numB2 = numB.divide(g2)
val sG2 = g2.toStatement()
s3.map(P.a to { it.c { reduceMultiplication("( ∨1c x. ∨2c )".with(numB2.toStatement(), sG2)).inv() }.s2(laMulassi) },
P.c to { it.a { reduceMultiplication("( ∨1c x. ∨2c )".with(denA2.toStatement(), sG2)).inv() }.s(laMul32i) })
.s(neoremDivcan5rd)
} else s3
s4.map(P.a to Engine::reduceMultiplication, P.c to Engine::reduceMultiplication).s{it.normalized}
}
else -> statement
}.reductionContract(statement)
}
private fun Engine.multiplies2digits(statement: Statement): Statement {
val (a, _, b) = statement.children
return when {
a.matchZero() -> result(laMul02i, b)
b.matchZero() -> result(laMul01i, a)
a.matchOne() -> result(laMulid2i, b)
b.matchOne() -> result(laMulid1i, a)
else -> {
// handle other digits
val digitA = a.asInt()
val digitB = b.asInt()
val s = result(laMul(digitA, digitB))
val s1 = if (digitA < digitB) result(laEqtri, statement, s, result(laMulcomi, a, b)) else s
(if (digitA * digitB == 10) result(laEqtri, statement, result(laDec10), s1) else s1)
}
}
}
Hello Filip :)
Hello Olivier!
Good luck with your CAS implementation! It is a wonderful work!
But I have 2 question for you:
1. The proofs of your calculations would be in formal math not in classic math, do you think that would be a segment of population that would need your proofs?
The segments of the population that I am targeting are :
-the average math student
-the professional math researcher
Basically, humans would enter the proof either
- through a well thought UI, that looks and behaves as much as it
can as traditional/classic math
(which makes it usable to the average math student).
- optionaly/additionaly, through text input (for power users
like professional mathematicians, they know what they do and it is
faster this way)
Basically, at the moment, it works this way :
You may have hypothesis/antecedent like
x = -2
y e. ZZ
You start with a (valid) math statement like
( x x. ; 1 2 ) + y
and then you may point to (select) parts of the statement and apply operations
like reduction, commutativity, associativity to transform the
Statement (with theorems the way you want/can) and the api makes
sure that, at then end, you get
- a formal proof of the resulting statement
- a formal proof of the transition
like |- ( x = 1 -> ( y e. ZZ -> ( x x. ; 1 2 ) + y = ; 1 2
+ y ) )
It is still a work in progress though. At the moment, I'm
refactoring the api to make it work nicely with antecedents. I
have made some progress but I have still work to do to make
the usual operations (+ - * /) work with rationals AND
antecedents, after which I should get sum, product and function
support quite effortlessly
Once I get that to work, I'll :
1) work on the export step from my theorem format to mm (or mm0
?).
Because I'm no longer using regular set.mm theorems (got to call
too many of them to handle 1 operation with antecedents)
2) start building a prototype app and a UI :
to allow the first beta users to build formal proofs
interactively and export them to mm (or mm0 ?)
2. Lots of math formulas are non-existent in set.mm,how do you deal with that?
At the moment, to develop my app,
I'm using my own theorems like this one
val A_plus_0 = tf("( ∨1c + 0 ) = ∨1c", "∨1c e. CC")
tf stands for "theorem family" because it also allows one to use it with antecedents like
|- ( v2w -> v1c e. CC )
gives
|- ( v2w -> (v1c + 0 ) = v1c )
v1c is for class
v1s for set
v1w for wff
Also, it removes the need for variants for
|- v1c = v2c
|- v3c = v4c
gives
|- ; v1c v3c = v2c v4c
like
|- v1c = v2c
gives
|- ; v1c v3c = v2c v3c
(you have to use such variants to lessen the huge amount of hypothesis that the system has to prove automatically to use the theorems)
such a theorem family could be easily proven (I believe) with the theorems that are in set.mm
But mm lacks variadic arguments and it makes it painfull to work
with antecedents, so this is the way I decided to use to work
around that (my app would first create a proof relying on some
theorem family and then it would prove all the specialized
theorems it uses)
As soon as I get arithmetic operations to work nicely, I'll post
a progress update
Thank you very much!
Filip
--
You received this message because you are subscribed to the Google Groups "Metamath" group.
To unsubscribe from this group and stop receiving emails from it, send an email to metamath+u...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/metamath/11dc6086-2cf5-4c81-bcef-e6a58978b234%40googlegroups.com.
"sum_ ∨1s e. ( 1 ... 5 ) ∨1s".test("; 1 5")where test just applies a "reduce" method on the left term and checks that it leads to the second term (and proofs it)
"sum_ ∨1s e. ( 1 ... 5 ) ( ( 2 x. ∨1s ) + 1 )".test("; 3 5")
"sum_ ∨1s e. ( 1 ... 9 ) ( ∨1s ^ 2 )".test("; ; 2 8 5")
"sum_ ∨1s e. ( 1 ... 4 ) ( ( 1 / 2 ) ^ ∨1s )".test("( ; 1 5 / ; 1 6 )")
"A. ∨1s e. ZZ 0 <_ ( ∨1s ^ 2 )".impTest("A. ∨1s e. NN 0 <_ ( ∨1s ^ 2 )") { it.imp(P.b + "|- NN C_ ZZ") }
"A. ∨1s e. ZZ 0 <_ ( ∨1s ^ 2 )".toAssertion().imp(P.b + "|- NN C_ ZZ")P.b tells the node of the statement where the change should happend
"( ( 1 + 2 ) = 3 /\\ ( 2 + 3 ) = 5 )".eqTest("( 3 = 3 /\\ 5 = 5 )") { it.eq4(P.a.a * add(1, 2), P.b.a * add(2, 3)) }
assume("( ∨3w -> ( ∨6w -> ∨4w ) )")
assume("( ( ∨3w -> ∨6w ) -> ( ∨5w <-> ∨5w ) )")
"( ( ∨3w -> ∨4w ) -> ∨5w )".impTest("( ( ∨3w -> ∨6w ) -> ∨5w )") { it.imp(P.a.b + "( ∨6w -> ∨4w )") }
"-. 3 < 1".limpTest("-. 3 < 2") { it.limp(P.a.c + "|- 1 < 2") }
/** proving that ( A e. CC -> ( C e. NN0 -> ( ( 1 - A ) x. sum_ k e. ( 0 .. ∨C ) ( ∨A ^ k ) ) = ( 1 - ( A ^ ( C + 1 ) ) ) ) )
* or with A=v1c, C=v3c, k=v2s
* ( ∨1c e. CC -> ( ∨3c e. NN0 -> ( ( 1 - ∨1c ) x. sum_ ∨2s e. ( 0 .. ∨3c ) ( ∨1c ^ ∨2s ) ) = ( 1 - ( ∨1c ^ ( ∨3c + 1 ) ) ) ) ) */
val assertion = "( ( 1 - ∨1c ) x. sum_ ∨2s e. ( 0 ... ∨3c ) ( ∨1c ^ ∨2s ) )".toAssertion("∨1c e. CC", "∨3c e. NN0")
val result = "( 1 - ( ∨1c ^ ( ∨3c + 1 ) ) )".toStatement()
val step = "( ∨1c e. CC -> ( ∨3c e. NN0 -> ( ( 1 - ∨1c ) x. sum_ ∨2s e. ( 0 ... ∨3c ) ( ∨1c ^ ∨2s ) ) = ( 1 - ( ∨1c ^ ( ∨3c + 1 ) ) ) ) )".toStatement()
val proof = assertion
.s(A_times_Sum)
// sum_ ∨2s e. ( 0 .. ∨3c ) ( ( 1 - ∨1c ) x. ( ∨1c ^ ∨2s ) )
.c(A_times_B_com)
// sum_ ∨2s e. ( 0 .. ∨3c ) ( ( ∨1c ^ ∨2s ) x. ( 1 - ∨1c ) )
.c(A_times_BsubC)
// sum_ ∨2s e. ( 0 .. ∨3c ) ( ( ( ∨1c ^ ∨2s ) x. 1 ) - ( ( ∨1c ^ ∨2s ) x. ∨1c ) )
.eq(P.c.a.th(A_times_1), P.c.c.th(A_exp_Bplus1)).s(Sum_AsubB)
// sum_ ∨2s e. ( 0 .. ∨3c ) ( ( ∨1c ^ ∨2s ) - ( ∨1c ^ ( ∨2s + 1 ) ) )
.s(Sum_AsubB)
// ( sum_ ∨2s e. ( 0 .. ∨3c ) ( ∨1c ^ ∨2s ) - sum_ ∨2s e. ( 0 .. ∨3c ) ( ∨1c ^ ( ∨2s + 1 ) ) )
.c(Sum_AplusB_in) { arrayOf("∨5s".toStatement(), statement1) }
// ( sum_ ∨2s e. ( 0 .. ∨3c ) ( ∨1c ^ ∨2s ) - sum_ ∨5s e. ( ( 0 + 1 ) .. ( ∨3c + 1 ) ) ( ∨1c ^ ∨5s ) )
.eq(P.c.b.a.op(Engine::reduceAddition))
// ( sum_ ∨2s e. ( 0 .. ∨3c ) ( ∨1c ^ ∨2s ) - sum_ ∨5s e. ( ( 1 .. ( ∨3c + 1 ) ) ( ∨1c ^ ∨5s ) )
.eq(P.a.th(firstA_plus_Sum_ofA), P.c.th(Sum_ofA_plus_lastA))
// ( ( ( ∨1c ^ 0 ) + sum_ ∨2s e. ( ( 0 + 1 ) .. ∨3c ) ( ∨1c ^ ∨2s ) ) - ( sum_ ∨5s e. ( ( 1 .. ( ( ∨3c + 1 ) - 1 ) ) ) ( ∨1c ^ ∨5s ) ) + ( ∨1c ^ ( ∨3c + 1 ) ))
.eq(
P.a.a.op(Engine::reduce),
P.a.c.b.op(Engine::reduce),
P.c.b.c.th(A_plus_B_sub_C_ass))
// ( ( 1 + sum_ ∨2s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨2s ) ) - ( sum_ ∨5s e. ( ( 1 .. ( ∨3c + ( 1 - 1 ) ) ) ( ∨1c ^ ∨5s ) ) + ( ∨1c ^ ( ∨3c + 1 ) ))
.eq(P.c.b.c.op(Engine::reduce))
// ( ( 1 + sum_ ∨2s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨2s ) ) - ( sum_ ∨5s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨5s ) ) + ( ∨1c ^ ( ∨3c + 1 ) ) )
.s(A_plus_B_sub_C_ass)
// ( 1 + ( sum_ ∨2s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨2s ) - ( sum_ ∨5s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨5s ) ) + ( ∨1c ^ ( ∨3c + 1 ) ) )
.c(A_sub_BplusC_ass)
// ( 1 + ( ( sum_ ∨2s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨2s ) - sum_ ∨5s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨5s ) ) - ( ∨1c ^ ( ∨3c + 1 ) ) )
.eq(P.c.a.c.th(Sum_AswitchB_in) { arrayOf("∨2s".toStatement(), "( ∨1c ^ ∨2s )".toStatement()) })
// ( 1 + ( ( sum_ ∨2s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨2s ) - sum_ ∨2s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨2s ) ) - ( ∨1c ^ ( ∨3c + 1 ) ) )
.eq(P.c.op(Engine::reduce))
// ( 1 - ( ∨1c ^ ( ∨3c + 1 ) ) )
/** done */
assertEquals(assertion.origin, proof.origin)
assertEquals(result, proof.result)
assertEquals(step, proof.step)
assertTrue(isProven(proof.step))
/** proving that ( ∨1c e. CC -> ( ∨3c e. NN0 -> ( ( 1 - ∨1c ) x. sum_ ∨2s e. ( 0 .. ∨3c ) ( ∨1c ^ ∨2s ) ) = ( 1 + -u ( ∨1c ^ ( ∨3c + 1 ) ) ) ) ) */
val assertion = "( ( 1 - ∨1c ) x. sum_ ∨2s e. ( 0 ... ∨3c ) ( ∨1c ^ ∨2s ) )".toAssertion("∨1c e. CC", "∨3c e. NN0")
val result = "( 1 + -u ( ∨1c ^ ( ∨3c + 1 ) ) )".toStatement()
val step = "|- ( ∨1c e. CC -> ( ∨3c e. NN0 -> ( ( 1 - ∨1c ) x. sum_ ∨2s e. ( 0 ... ∨3c ) ( ∨1c ^ ∨2s ) ) = ( 1 + -u ( ∨1c ^ ( ∨3c + 1 ) ) ) ) )".toStatement()
val proof = assertion
.s(A_times_Sum) // sum_ ∨2s e. ( 0 .. ∨3c ) ( ( 1 - ∨1c ) x. ( ∨1c ^ ∨2s ) )
.c(A_times_B_com) // sum_ ∨2s e. ( 0 .. ∨3c ) ( ( ∨1c ^ ∨2s ) x. ( 1 - ∨1c ) )
.c(A_times_BsubC) // sum_ ∨2s e. ( 0 .. ∨3c ) ( ( ( ∨1c ^ ∨2s ) x. 1 ) - ( ( ∨1c ^ ∨2s ) x. ∨1c ) )
.eq(P.c.a.th(A_times_1), P.c.c.th(A_exp_Bplus1)) // sum_ ∨2s e. ( 0 .. ∨3c ) ( ( ∨1c ^ ∨2s ) - ( ∨1c ^ ( ∨2s + 1 ) ) )
.s(Sum_AsubB) // ( sum_ ∨2s e. ( 0 .. ∨3c ) ( ∨1c ^ ∨2s ) - sum_ ∨2s e. ( 0 .. ∨3c ) ( ∨1c ^ ( ∨2s + 1 ) ) )
.c(Sum_AplusB_in) { arrayOf("∨5s".toStatement(), statement1, "( ∨1c ^ ( ( ∨5s - 1 ) + 1 ) )".toStatement()) } // ( sum_ ∨2s e. ( 0 .. ∨3c ) ( ∨1c ^ ∨2s ) - sum_ ∨5s e. ( ( 0 + 1 ) .. ( ∨3c + 1 ) ) ( ∨1c ^ ( ( ∨5s - 1 ) + 1 ) ) )
.eq(P.c.b.a.op(Engine::reduceAddition)) // ( sum_ ∨2s e. ( 0 ... ∨3c ) ( ∨1c ^ ∨2s ) - sum_ ∨5s e. ( 1 ... ( ∨3c + 1 ) ) ( ∨1c ^ ( ( ∨5s - 1 ) + 1 )
.eq(P.c.c.c.th(AsubB_plus_C_rot)) // ( sum_ ∨2s e. ( 0 ... ∨3c ) ( ∨1c ^ ∨2s ) - sum_ ∨5s e. ( 1 ... ( ∨3c + 1 ) ) ( ∨1c ^ ( ∨5s - ( 1 - 1 ) ) )
.eq(P.c.c.op(Engine::reduce)) // ( sum_ ∨2s e. ( 0 ... ∨3c ) ( ∨1c ^ ∨2s ) - sum_ ∨5s e. ( 1 ... ( ∨3c + 1 ) ) ( ∨1c ^ ∨5s ) )
.eq(P.a.th(firstA_plus_Sum_of) { arrayOf("( ∨1c ^ 0 )".toStatement()) }, P.c.th(Sum_ofA_plus_lastA) { arrayOf("( ∨1c ^ ( ∨3c + 1 ) )".toStatement()) }) // ( ( ( ∨1c ^ 0 ) + sum_ ∨2s e. ( ( 0 + 1 ) ... ∨3c ) ( ∨1c ^ ∨2s ) ) - ( sum_ ∨5s e. ( 1 ... ( ( ∨3c + 1 ) - 1 ) ) ( ∨1c ^ ∨5s ) + ( ∨1c ^ ( ∨3c + 1 ) ) ) )
.eq(P.a.a.op(Engine::reduce), P.a.c.b.op(Engine::reduce), P.c.a.b.c.th(AplusB_sub_C_ass) // ( ( 1 + sum_ ∨2s e. ( 1 ... ∨3c ) ( ∨1c ^ ∨2s ) ) - ( sum_ ∨5s e. ( 1 ... ( ∨3c + ( 1 - 1 ) ) ) ( ∨1c ^ ∨5s ) + ( ∨1c ^ ( ∨3c + 1 ) ) ) )
.eq(P.c.a.b.c.op(Engine::reduce))// ( 1 + sum_ ∨2s e. ( 1 ... ∨3c ) ( ∨1c ^ ∨2s ) ) - ( sum_ ∨5s e. ( 1 ... ∨3c ) ( ∨1c ^ ∨5s ) + ( ∨1c ^ ( ∨3c + 1 ) ) ) )
.s(AplusB_sub_C_ass) // ( 1 + ( sum_ ∨2s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨2s ) - ( sum_ ∨5s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨5s ) ) + ( ∨1c ^ ( ∨3c + 1 ) ) )
.c(A_sub_BplusC_ass) // ( 1 + ( ( sum_ ∨2s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨2s ) - sum_ ∨5s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨5s ) ) - ( ∨1c ^ ( ∨3c + 1 ) ) )
.eq(P.c.a.c.th(Sum_AswitchB_i) { arrayOf("∨2s".toStatement(), "( ∨1c ^ ∨2s )".toStatement()) }) // ( 1 + ( ( sum_ ∨2s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨2s ) - sum_ ∨2s e. ( 1 .. ∨3c ) ( ∨1c ^ ∨2s ) ) - ( ∨1c ^ ( ∨3c + 1 ) ) )
.eq(P.c.op(Engine::reduce)) // ( 1 + -u ( ∨1c ^ ( ∨3c + 1 ) ) )
/** done */
assertEquals(assertion.origin, proof.origin)
assertEquals(result, proof.result)
assertEquals(step, proof.step)
val step = "( ( 1 + 1 ) + 1 )".toAssertion().s(Engine::reduce).step
val proof = Mephistolus2Metamath.mmProofFor(step, this@with)
MetamathProofChecker.process("test", NormalizedTheorem(step, listOf(), 0, setOf()), proof)