summaryrefslogtreecommitdiffstats
path: root/llgo/third_party/gofrontend/libgo/go/runtime/mgc0.go
diff options
context:
space:
mode:
Diffstat (limited to 'llgo/third_party/gofrontend/libgo/go/runtime/mgc0.go')
-rw-r--r--llgo/third_party/gofrontend/libgo/go/runtime/mgc0.go129
1 files changed, 127 insertions, 2 deletions
diff --git a/llgo/third_party/gofrontend/libgo/go/runtime/mgc0.go b/llgo/third_party/gofrontend/libgo/go/runtime/mgc0.go
index 624485d18bf..cbf5e9cfdef 100644
--- a/llgo/third_party/gofrontend/libgo/go/runtime/mgc0.go
+++ b/llgo/third_party/gofrontend/libgo/go/runtime/mgc0.go
@@ -4,6 +4,8 @@
package runtime
+import "unsafe"
+
// Called from C. Returns the Go type *m.
func gc_m_ptr(ret *interface{}) {
*ret = (*m)(nil)
@@ -19,9 +21,132 @@ func gc_itab_ptr(ret *interface{}) {
*ret = (*itab)(nil)
}
-func timenow() (sec int64, nsec int32)
-
func gc_unixnanotime(now *int64) {
sec, nsec := timenow()
*now = sec*1e9 + int64(nsec)
}
+
+func freeOSMemory() {
+ gogc(2) // force GC and do eager sweep
+ onM(scavenge_m)
+}
+
+var poolcleanup func()
+
+func registerPoolCleanup(f func()) {
+ poolcleanup = f
+}
+
+func clearpools() {
+ // clear sync.Pools
+ if poolcleanup != nil {
+ poolcleanup()
+ }
+
+ for _, p := range &allp {
+ if p == nil {
+ break
+ }
+ // clear tinyalloc pool
+ if c := p.mcache; c != nil {
+ c.tiny = nil
+ c.tinysize = 0
+
+ // disconnect cached list before dropping it on the floor,
+ // so that a dangling ref to one entry does not pin all of them.
+ var sg, sgnext *sudog
+ for sg = c.sudogcache; sg != nil; sg = sgnext {
+ sgnext = sg.next
+ sg.next = nil
+ }
+ c.sudogcache = nil
+ }
+
+ // clear defer pools
+ for i := range p.deferpool {
+ // disconnect cached list before dropping it on the floor,
+ // so that a dangling ref to one entry does not pin all of them.
+ var d, dlink *_defer
+ for d = p.deferpool[i]; d != nil; d = dlink {
+ dlink = d.link
+ d.link = nil
+ }
+ p.deferpool[i] = nil
+ }
+ }
+}
+
+func gosweepone() uintptr
+func gosweepdone() bool
+
+func bgsweep() {
+ getg().issystem = true
+ for {
+ for gosweepone() != ^uintptr(0) {
+ sweep.nbgsweep++
+ Gosched()
+ }
+ lock(&gclock)
+ if !gosweepdone() {
+ // This can happen if a GC runs between
+ // gosweepone returning ^0 above
+ // and the lock being acquired.
+ unlock(&gclock)
+ continue
+ }
+ sweep.parked = true
+ goparkunlock(&gclock, "GC sweep wait")
+ }
+}
+
+// NOTE: Really dst *unsafe.Pointer, src unsafe.Pointer,
+// but if we do that, Go inserts a write barrier on *dst = src.
+//go:nosplit
+func writebarrierptr(dst *uintptr, src uintptr) {
+ *dst = src
+}
+
+//go:nosplit
+func writebarrierstring(dst *[2]uintptr, src [2]uintptr) {
+ dst[0] = src[0]
+ dst[1] = src[1]
+}
+
+//go:nosplit
+func writebarrierslice(dst *[3]uintptr, src [3]uintptr) {
+ dst[0] = src[0]
+ dst[1] = src[1]
+ dst[2] = src[2]
+}
+
+//go:nosplit
+func writebarrieriface(dst *[2]uintptr, src [2]uintptr) {
+ dst[0] = src[0]
+ dst[1] = src[1]
+}
+
+//go:nosplit
+func writebarrierfat2(dst *[2]uintptr, _ *byte, src [2]uintptr) {
+ dst[0] = src[0]
+ dst[1] = src[1]
+}
+
+//go:nosplit
+func writebarrierfat3(dst *[3]uintptr, _ *byte, src [3]uintptr) {
+ dst[0] = src[0]
+ dst[1] = src[1]
+ dst[2] = src[2]
+}
+
+//go:nosplit
+func writebarrierfat4(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ dst[0] = src[0]
+ dst[1] = src[1]
+ dst[2] = src[2]
+ dst[3] = src[3]
+}
+
+//go:nosplit
+func writebarrierfat(typ *_type, dst, src unsafe.Pointer) {
+ memmove(dst, src, typ.size)
+}
OpenPOWER on IntegriCloud