diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index 714b9a5..3f7579d 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -967,6 +967,11 @@
scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
+ // Scan conservatively the extended register state for the archs where it may contain pointers.
+ if GOARCH == "arm64" && gp.asyncSafePoint {
+ xRegScan(gp, gcw, &state)
+ }
+
// Scan the stack. Accumulate a list of stack objects.
var u unwinder
for u.init(gp, 0); u.valid(); u.next() {
diff --git a/src/runtime/preempt_noxreg.go b/src/runtime/preempt_noxreg.go
index 977bf0b..ee6f879 100644
--- a/src/runtime/preempt_noxreg.go
+++ b/src/runtime/preempt_noxreg.go
@@ -25,3 +25,5 @@
func xRegRestore(gp *g) {}
func (*xRegPerP) free() {}
+
+func xRegScan(gp *g, gcw *gcWork, state *stackScanState) {}
diff --git a/src/runtime/preempt_xreg.go b/src/runtime/preempt_xreg.go
index cc52c5f..c9eda53 100644
--- a/src/runtime/preempt_xreg.go
+++ b/src/runtime/preempt_xreg.go
@@ -10,8 +10,9 @@
// While asynchronous preemption stores general-purpose (GP) registers on the
// preempted goroutine's own stack, extended register state can be used to save
// non-GP state off the stack. In particular, this is meant for large vector
-// register files. Currently, we assume this contains only scalar data, though
-// we could change this constraint by conservatively scanning this memory.
+// register files. Currently, we assume this contains only scalar data,
+// except for arm64 which conservatively scans this memory to enable small size
+// memoves using non-GP registers.
//
// For an architecture to support extended register state, it must provide a Go
// definition of an xRegState type for storing the state, and its asyncPreempt
@@ -20,6 +21,7 @@
package runtime
import (
+ "internal/abi"
"internal/runtime/sys"
"unsafe"
)
@@ -135,3 +137,33 @@
unlock(&xRegAlloc.lock)
}
}
+
+// xRegScan conservatively scans the extended register state.
+//
+// This is supposed to be called only by scanstack when it handles async preemption.
+func xRegScan(gp *g, gcw *gcWork, state *stackScanState) {
+ // The scan is only needed for the archs where pointers may appear in the extended register state.
+ if GOARCH != "arm64" {
+ return
+ }
+ // Regular async preemption always provides the extended register state.
+ if gp.xRegs.state == nil {
+ var u unwinder
+ for u.init(gp, 0); u.valid(); u.next() {
+ if u.frame.fn.valid() && u.frame.fn.funcID == abi.FuncID_debugCallV2 {
+ return
+ }
+ }
+ println("runtime: gp=", gp, ", goid=", gp.goid)
+ throw("gp.xRegs.state == nil on a scanstack attempt during async preemption")
+ }
+ b := uintptr(unsafe.Pointer(&gp.xRegs.state.regs))
+ n := uintptr(unsafe.Sizeof(gp.xRegs.state.regs))
+ if debugScanConservative {
+ print("begin scan xRegs of goroutine ", gp.goid, " at [", hex(b), ",", hex(b+n), ")\n")
+ }
+ scanConservative(b, n, nil, gcw, state)
+ if debugScanConservative {
+ print("end scan xRegs of goroutine ", gp.goid, "\n")
+ }
+}