diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
index 4ea8561..487746e 100644
--- a/src/cmd/compile/internal/ssa/check.go
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -345,6 +345,47 @@
v.Op, v.Args[1].Type.String())
}
}
+ // Check size of args.
+ // This list isn't exhaustive, just the common ops.
+ // It also can't handle ops with args of different types, like shifts.
+ var argSize int64
+ switch v.Op {
+ case OpAdd8, OpSub8, OpMul8, OpDiv8, OpDiv8U, OpMod8, OpMod8U,
+ OpAnd8, OpOr8, OpXor8,
+ OpEq8, OpNeq8, OpLess8, OpLeq8,
+ OpNeg8, OpCom8,
+ OpSignExt8to16, OpSignExt8to32, OpSignExt8to64,
+ OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64:
+ argSize = 1
+ case OpAdd16, OpSub16, OpMul16, OpDiv16, OpDiv16U, OpMod16, OpMod16U,
+ OpAnd16, OpOr16, OpXor16,
+ OpEq16, OpNeq16, OpLess16, OpLeq16,
+ OpNeg16, OpCom16,
+ OpSignExt16to32, OpSignExt16to64,
+ OpZeroExt16to32, OpZeroExt16to64,
+ OpTrunc16to8:
+ argSize = 2
+ case OpAdd32, OpSub32, OpMul32, OpDiv32, OpDiv32U, OpMod32, OpMod32U,
+ OpAnd32, OpOr32, OpXor32,
+ OpEq32, OpNeq32, OpLess32, OpLeq32,
+ OpNeg32, OpCom32,
+ OpSignExt32to64, OpZeroExt32to64,
+ OpTrunc32to8, OpTrunc32to16:
+ argSize = 4
+ case OpAdd64, OpSub64, OpMul64, OpDiv64, OpDiv64U, OpMod64, OpMod64U,
+ OpAnd64, OpOr64, OpXor64,
+ OpEq64, OpNeq64, OpLess64, OpLeq64,
+ OpNeg64, OpCom64,
+ OpTrunc64to8, OpTrunc64to16, OpTrunc64to32:
+ argSize = 8
+ }
+ if argSize != 0 {
+ for i, arg := range v.Args {
+ if arg.Type.Size() != argSize {
+ f.Fatalf("arg %d to %s should be %d bytes in size, it is %s", i, v.Op, argSize, arg.Type.String())
+ }
+ }
+ }
// TODO: check for cycles in values
}
diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go
index 4425c56..9805039 100644
--- a/src/cmd/compile/internal/ssagen/intrinsics.go
+++ b/src/cmd/compile/internal/ssagen/intrinsics.go
@@ -1442,7 +1442,7 @@
// byte N matched).
//
// NOTE: See comment above on bitsetFirst.
- out := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT16], eq)
+ out := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT8], eq)
// g is only 64-bits so the upper 64-bits of the
// 128-bit register will be zero. If h2 is also zero,
@@ -1502,7 +1502,7 @@
// means byte N matched).
//
// NOTE: See comment above on bitsetFirst.
- ret := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT16], sign)
+ ret := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT64], sign)
// g is only 64-bits so the upper 64-bits of
// the 128-bit register will be zero. PSIGNB
@@ -1532,7 +1532,7 @@
// byte N matched).
//
// NOTE: See comment above on bitsetFirst.
- out := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT16], eq)
+ out := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT8], eq)
// g is only 64-bits so the upper 64-bits of the
// 128-bit register will be zero. The upper 64-bits of
@@ -1566,7 +1566,7 @@
// byte N matched).
//
// NOTE: See comment above on bitsetFirst.
- ret := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT16], gfp)
+ ret := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT64], gfp)
// g is only 64-bits so the upper 64-bits of the
// 128-bit register will be zero. Zero will never match
@@ -1598,10 +1598,10 @@
// byte N matched).
//
// NOTE: See comment above on bitsetFirst.
- mask := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT16], gfp)
+ mask := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT8], gfp)
// Invert the mask to set the bits for the full slots.
- out := s.newValue1(ssa.OpCom16, types.Types[types.TUINT16], mask)
+ out := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], mask)
// g is only 64-bits so the upper 64-bits of the
// 128-bit register will be zero, with bit 7 unset.