summaryrefslogtreecommitdiffstats
path: root/llgo/third_party/gofrontend/libgo/go/reflect
diff options
context:
space:
mode:
Diffstat (limited to 'llgo/third_party/gofrontend/libgo/go/reflect')
-rw-r--r--llgo/third_party/gofrontend/libgo/go/reflect/all_test.go261
-rw-r--r--llgo/third_party/gofrontend/libgo/go/reflect/export_test.go8
-rw-r--r--llgo/third_party/gofrontend/libgo/go/reflect/makefunc.go10
-rw-r--r--llgo/third_party/gofrontend/libgo/go/reflect/makefunc_ffi.go2
-rw-r--r--llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_386.go13
-rw-r--r--llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_amd64.go11
-rw-r--r--llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_s390.go13
-rw-r--r--llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_s390x.go13
-rw-r--r--llgo/third_party/gofrontend/libgo/go/reflect/type.go278
-rw-r--r--llgo/third_party/gofrontend/libgo/go/reflect/value.go593
10 files changed, 708 insertions, 494 deletions
diff --git a/llgo/third_party/gofrontend/libgo/go/reflect/all_test.go b/llgo/third_party/gofrontend/libgo/go/reflect/all_test.go
index 3e107795bdf..bda87867c74 100644
--- a/llgo/third_party/gofrontend/libgo/go/reflect/all_test.go
+++ b/llgo/third_party/gofrontend/libgo/go/reflect/all_test.go
@@ -679,7 +679,7 @@ var deepEqualTests = []DeepEqualTest{
{1, nil, false},
{fn1, fn3, false},
{fn3, fn3, false},
- {[][]int{[]int{1}}, [][]int{[]int{2}}, false},
+ {[][]int{{1}}, [][]int{{2}}, false},
// Nil vs empty: not the same.
{[]int{}, []int(nil), false},
@@ -2507,10 +2507,21 @@ func TestAllocations(t *testing.T) {
noAlloc(t, 100, func(j int) {
var i interface{}
var v Value
- i = 42 + j
+
+ // We can uncomment this when compiler escape analysis
+ // is good enough to see that the integer assigned to i
+ // does not escape and therefore need not be allocated.
+ //
+ // i = 42 + j
+ // v = ValueOf(i)
+ // if int(v.Int()) != 42+j {
+ // panic("wrong int")
+ // }
+
+ i = func(j int) int { return j }
v = ValueOf(i)
- if int(v.Int()) != 42+j {
- panic("wrong int")
+ if v.Interface().(func(int) int)(j) != j {
+ panic("wrong result")
}
})
}
@@ -2571,6 +2582,15 @@ func TestSlice(t *testing.T) {
if vs != s[3:5] {
t.Errorf("s.Slice(3, 5) = %q; expected %q", vs, s[3:5])
}
+
+ rv := ValueOf(&xs).Elem()
+ rv = rv.Slice(3, 4)
+ ptr2 := rv.Pointer()
+ rv = rv.Slice(5, 5)
+ ptr3 := rv.Pointer()
+ if ptr3 != ptr2 {
+ t.Errorf("xs.Slice(3,4).Slice3(5,5).Pointer() = %#x, want %#x", ptr3, ptr2)
+ }
}
func TestSlice3(t *testing.T) {
@@ -2609,6 +2629,15 @@ func TestSlice3(t *testing.T) {
s := "hello world"
rv = ValueOf(&s).Elem()
shouldPanic(func() { rv.Slice3(1, 2, 3) })
+
+ rv = ValueOf(&xs).Elem()
+ rv = rv.Slice3(3, 5, 7)
+ ptr2 := rv.Pointer()
+ rv = rv.Slice3(4, 4, 4)
+ ptr3 := rv.Pointer()
+ if ptr3 != ptr2 {
+ t.Errorf("xs.Slice3(3,5,7).Slice3(4,4,4).Pointer() = %#x, want %#x", ptr3, ptr2)
+ }
}
func TestSetLenCap(t *testing.T) {
@@ -2667,6 +2696,26 @@ func TestFuncArg(t *testing.T) {
}
}
+func TestStructArg(t *testing.T) {
+ type padded struct {
+ B string
+ C int32
+ }
+ var (
+ gotA padded
+ gotB uint32
+ wantA = padded{"3", 4}
+ wantB = uint32(5)
+ )
+ f := func(a padded, b uint32) {
+ gotA, gotB = a, b
+ }
+ ValueOf(f).Call([]Value{ValueOf(wantA), ValueOf(wantB)})
+ if gotA != wantA || gotB != wantB {
+ t.Errorf("function called with (%v, %v), want (%v, %v)", gotA, gotB, wantA, wantB)
+ }
+}
+
var tagGetTests = []struct {
Tag StructTag
Key string
@@ -3244,6 +3293,44 @@ func TestConvert(t *testing.T) {
}
}
+type ComparableStruct struct {
+ X int
+}
+
+type NonComparableStruct struct {
+ X int
+ Y map[string]int
+}
+
+var comparableTests = []struct {
+ typ Type
+ ok bool
+}{
+ {TypeOf(1), true},
+ {TypeOf("hello"), true},
+ {TypeOf(new(byte)), true},
+ {TypeOf((func())(nil)), false},
+ {TypeOf([]byte{}), false},
+ {TypeOf(map[string]int{}), false},
+ {TypeOf(make(chan int)), true},
+ {TypeOf(1.5), true},
+ {TypeOf(false), true},
+ {TypeOf(1i), true},
+ {TypeOf(ComparableStruct{}), true},
+ {TypeOf(NonComparableStruct{}), false},
+ {TypeOf([10]map[string]int{}), false},
+ {TypeOf([10]string{}), true},
+ {TypeOf(new(interface{})).Elem(), true},
+}
+
+func TestComparable(t *testing.T) {
+ for _, tt := range comparableTests {
+ if ok := tt.typ.Comparable(); ok != tt.ok {
+ t.Errorf("TypeOf(%v).Comparable() = %v, want %v", tt.typ, ok, tt.ok)
+ }
+ }
+}
+
func TestOverflow(t *testing.T) {
if ovf := V(float64(0)).OverflowFloat(1e300); ovf {
t.Errorf("%v wrongly overflows float64", 1e300)
@@ -3290,6 +3377,9 @@ func checkSameType(t *testing.T, x, y interface{}) {
}
func TestArrayOf(t *testing.T) {
+ // TODO(rsc): Finish ArrayOf and enable-test.
+ t.Skip("ArrayOf is not finished (and not exported)")
+
// check construction and use of type not in binary
type T int
at := ArrayOf(10, TypeOf(T(1)))
@@ -3911,3 +4001,166 @@ func TestCallMethodJump(t *testing.T) {
// Stop garbage collecting during reflect.call.
*CallGC = false
}
+
+func TestMakeFuncStackCopy(t *testing.T) {
+ target := func(in []Value) []Value {
+ runtime.GC()
+ useStack(16)
+ return []Value{ValueOf(9)}
+ }
+
+ var concrete func(*int, int) int
+ fn := MakeFunc(ValueOf(concrete).Type(), target)
+ ValueOf(&concrete).Elem().Set(fn)
+ x := concrete(nil, 7)
+ if x != 9 {
+ t.Errorf("have %#q want 9", x)
+ }
+}
+
+// use about n KB of stack
+func useStack(n int) {
+ if n == 0 {
+ return
+ }
+ var b [1024]byte // makes frame about 1KB
+ useStack(n - 1 + int(b[99]))
+}
+
+type Impl struct{}
+
+func (Impl) f() {}
+
+func TestValueString(t *testing.T) {
+ rv := ValueOf(Impl{})
+ if rv.String() != "<reflect_test.Impl Value>" {
+ t.Errorf("ValueOf(Impl{}).String() = %q, want %q", rv.String(), "<reflect_test.Impl Value>")
+ }
+
+ method := rv.Method(0)
+ if method.String() != "<func() Value>" {
+ t.Errorf("ValueOf(Impl{}).Method(0).String() = %q, want %q", method.String(), "<func() Value>")
+ }
+}
+
+func TestInvalid(t *testing.T) {
+ // Used to have inconsistency between IsValid() and Kind() != Invalid.
+ type T struct{ v interface{} }
+
+ v := ValueOf(T{}).Field(0)
+ if v.IsValid() != true || v.Kind() != Interface {
+ t.Errorf("field: IsValid=%v, Kind=%v, want true, Interface", v.IsValid(), v.Kind())
+ }
+ v = v.Elem()
+ if v.IsValid() != false || v.Kind() != Invalid {
+ t.Errorf("field elem: IsValid=%v, Kind=%v, want false, Invalid", v.IsValid(), v.Kind())
+ }
+}
+
+// Issue 8917.
+func TestLargeGCProg(t *testing.T) {
+ fv := ValueOf(func([256]*byte) {})
+ fv.Call([]Value{ValueOf([256]*byte{})})
+}
+
+// Issue 9179.
+func TestCallGC(t *testing.T) {
+ f := func(a, b, c, d, e string) {
+ }
+ g := func(in []Value) []Value {
+ runtime.GC()
+ return nil
+ }
+ typ := ValueOf(f).Type()
+ f2 := MakeFunc(typ, g).Interface().(func(string, string, string, string, string))
+ f2("four", "five5", "six666", "seven77", "eight888")
+}
+
+type funcLayoutTest struct {
+ rcvr, t Type
+ argsize, retOffset uintptr
+ stack []byte
+}
+
+var funcLayoutTests []funcLayoutTest
+
+func init() {
+ var argAlign = PtrSize
+ if runtime.GOARCH == "amd64p32" {
+ argAlign = 2 * PtrSize
+ }
+ roundup := func(x uintptr, a uintptr) uintptr {
+ return (x + a - 1) / a * a
+ }
+
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ nil,
+ ValueOf(func(a, b string) string { return "" }).Type(),
+ 4 * PtrSize,
+ 4 * PtrSize,
+ []byte{BitsPointer, BitsScalar, BitsPointer},
+ })
+
+ var r []byte
+ if PtrSize == 4 {
+ r = []byte{BitsScalar, BitsScalar, BitsScalar, BitsPointer}
+ } else {
+ r = []byte{BitsScalar, BitsScalar, BitsPointer}
+ }
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ nil,
+ ValueOf(func(a, b, c uint32, p *byte, d uint16) {}).Type(),
+ roundup(3*4, PtrSize) + PtrSize + 2,
+ roundup(roundup(3*4, PtrSize)+PtrSize+2, argAlign),
+ r,
+ })
+
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ nil,
+ ValueOf(func(a map[int]int, b uintptr, c interface{}) {}).Type(),
+ 4 * PtrSize,
+ 4 * PtrSize,
+ []byte{BitsPointer, BitsScalar, BitsPointer, BitsPointer},
+ })
+
+ type S struct {
+ a, b uintptr
+ c, d *byte
+ }
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ nil,
+ ValueOf(func(a S) {}).Type(),
+ 4 * PtrSize,
+ 4 * PtrSize,
+ []byte{BitsScalar, BitsScalar, BitsPointer, BitsPointer},
+ })
+
+ funcLayoutTests = append(funcLayoutTests,
+ funcLayoutTest{
+ ValueOf((*byte)(nil)).Type(),
+ ValueOf(func(a uintptr, b *int) {}).Type(),
+ 3 * PtrSize,
+ roundup(3*PtrSize, argAlign),
+ []byte{BitsPointer, BitsScalar, BitsPointer},
+ })
+}
+
+func TestFuncLayout(t *testing.T) {
+ t.Skip("gccgo does not use funcLayout")
+ for _, lt := range funcLayoutTests {
+ _, argsize, retOffset, stack := FuncLayout(lt.t, lt.rcvr)
+ if argsize != lt.argsize {
+ t.Errorf("funcLayout(%v, %v).argsize=%d, want %d", lt.t, lt.rcvr, argsize, lt.argsize)
+ }
+ if retOffset != lt.retOffset {
+ t.Errorf("funcLayout(%v, %v).retOffset=%d, want %d", lt.t, lt.rcvr, retOffset, lt.retOffset)
+ }
+ if !bytes.Equal(stack, lt.stack) {
+ t.Errorf("funcLayout(%v, %v).stack=%v, want %v", lt.t, lt.rcvr, stack, lt.stack)
+ }
+ }
+}
diff --git a/llgo/third_party/gofrontend/libgo/go/reflect/export_test.go b/llgo/third_party/gofrontend/libgo/go/reflect/export_test.go
index 0778ad37f5c..49c45e82b2e 100644
--- a/llgo/third_party/gofrontend/libgo/go/reflect/export_test.go
+++ b/llgo/third_party/gofrontend/libgo/go/reflect/export_test.go
@@ -17,3 +17,11 @@ func IsRO(v Value) bool {
var ArrayOf = arrayOf
var CallGC = &callGC
+
+const PtrSize = ptrSize
+const BitsPointer = bitsPointer
+const BitsScalar = bitsScalar
+
+func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, stack []byte) {
+ return
+}
diff --git a/llgo/third_party/gofrontend/libgo/go/reflect/makefunc.go b/llgo/third_party/gofrontend/libgo/go/reflect/makefunc.go
index eb4589c6ce9..276be26108f 100644
--- a/llgo/third_party/gofrontend/libgo/go/reflect/makefunc.go
+++ b/llgo/third_party/gofrontend/libgo/go/reflect/makefunc.go
@@ -79,7 +79,7 @@ func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
ffi: ffi,
}
- return Value{t, unsafe.Pointer(&impl), flag(Func<<flagKindShift) | flagIndir}
+ return Value{t, unsafe.Pointer(&impl), flag(Func) | flagIndir}
}
// makeFuncStub is an assembly function that is the code half of
@@ -103,8 +103,8 @@ func makeMethodValue(op string, v Value) Value {
// Ignoring the flagMethod bit, v describes the receiver, not the method type.
fl := v.flag & (flagRO | flagAddr | flagIndir)
- fl |= flag(v.typ.Kind()) << flagKindShift
- rcvr := Value{v.typ, v.ptr /* v.scalar, */, fl}
+ fl |= flag(v.typ.Kind())
+ rcvr := Value{v.typ, v.ptr, fl}
// v.Type returns the actual type of the method value.
ft := v.Type().(*rtype)
@@ -134,7 +134,7 @@ func makeMethodValue(op string, v Value) Value {
fv.code, fv.ffi = makeFuncFFI(ftyp, fv.call)
}
- return Value{ft, unsafe.Pointer(&fv), v.flag&flagRO | flag(Func)<<flagKindShift | flagIndir}
+ return Value{ft, unsafe.Pointer(&fv), v.flag&flagRO | flag(Func) | flagIndir}
}
// makeValueMethod takes a method function and returns a function that
@@ -169,7 +169,7 @@ func makeValueMethod(v Value) Value {
impl.code, impl.ffi = makeFuncFFI(ftyp, impl.call)
}
- return Value{t, unsafe.Pointer(&impl), flag(Func<<flagKindShift) | flagIndir}
+ return Value{t, unsafe.Pointer(&impl), v.flag&flagRO | flag(Func) | flagIndir}
}
// Call the function represented by a makeFuncImpl.
diff --git a/llgo/third_party/gofrontend/libgo/go/reflect/makefunc_ffi.go b/llgo/third_party/gofrontend/libgo/go/reflect/makefunc_ffi.go
index a13ef179f5d..40c1ea80fbe 100644
--- a/llgo/third_party/gofrontend/libgo/go/reflect/makefunc_ffi.go
+++ b/llgo/third_party/gofrontend/libgo/go/reflect/makefunc_ffi.go
@@ -56,7 +56,7 @@ func ffiCall(ftyp *funcType, fn func([]Value) []Value, params unsafe.Pointer, re
for _, rt := range ftyp.in {
p := unsafe_New(rt)
memmove(p, *(*unsafe.Pointer)(ap), rt.size)
- v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir}
+ v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap = (unsafe.Pointer)(uintptr(ap) + ptrSize)
}
diff --git a/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_386.go b/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_386.go
index 7809fb01f23..c20f0ac3b3e 100644
--- a/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_386.go
+++ b/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_386.go
@@ -75,7 +75,7 @@ func MakeFuncStubGo(regs *i386Regs, c *makeFuncImpl) {
p := unsafe_New(rt)
memmove(p, unsafe.Pointer(ap), rt.size)
- v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir}
+ v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap += rt.size
}
@@ -128,15 +128,12 @@ func MakeFuncStubGo(regs *i386Regs, c *makeFuncImpl) {
v := out[0]
switch v.Kind() {
- case Ptr, UnsafePointer:
+ case Ptr, UnsafePointer, Chan, Func, Map:
regs.eax = uint32(uintptr(v.pointer()))
- case Float32:
- regs.st0 = float64(*(*float32)(v.ptr))
- regs.sf = true
- case Float64:
- regs.st0 = *(*float64)(v.ptr)
+ case Float32, Float64:
+ regs.st0 = v.Float()
regs.sf = true
default:
- regs.eax = uint32(loadScalar(v.ptr, v.typ.size))
+ memmove(unsafe.Pointer(&regs.eax), v.ptr, v.typ.size)
}
}
diff --git a/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_amd64.go b/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_amd64.go
index 7118951d1fd..a236aa26795 100644
--- a/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_amd64.go
+++ b/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_amd64.go
@@ -224,7 +224,7 @@ argloop:
for _, rt := range ftyp.in {
c1, c2 := amd64Classify(rt)
- fl := flag(rt.Kind()) << flagKindShift
+ fl := flag(rt.Kind())
if c2 == amd64NoClass {
// Argument is passed in a single register or
@@ -364,10 +364,11 @@ argloop:
if len(out) == 1 && ret2 == amd64NoClass {
v := out[0]
var w unsafe.Pointer
- if v.Kind() == Ptr || v.Kind() == UnsafePointer {
+ switch v.Kind() {
+ case Ptr, UnsafePointer, Chan, Func, Map:
w = v.pointer()
- } else {
- w = unsafe.Pointer(loadScalar(v.ptr, v.typ.size))
+ default:
+ memmove(unsafe.Pointer(&w), v.ptr, v.typ.size)
}
switch ret1 {
case amd64Integer:
@@ -439,7 +440,7 @@ func amd64Memarg(in []Value, ap uintptr, rt *rtype) ([]Value, uintptr) {
p := unsafe_New(rt)
memmove(p, unsafe.Pointer(ap), rt.size)
- v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir}
+ v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap += rt.size
return in, ap
diff --git a/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_s390.go b/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_s390.go
index ff22add81a1..47daa77f6bb 100644
--- a/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_s390.go
+++ b/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_s390.go
@@ -232,7 +232,7 @@ func S390MakeFuncStubGo(regs *s390_regs, c *makeFuncImpl) {
argloop:
for _, rt := range ftyp.in {
class, off_reg, off_slot := s390ClassifyParameter(rt)
- fl := flag(rt.Kind()) << flagKindShift
+ fl := flag(rt.Kind())
switch class {
case s390_empty:
v := Value{rt, nil, fl | flagIndir}
@@ -338,10 +338,11 @@ argloop:
// Single return value in a general or floating point register.
v := out[0]
var w uintptr
- if v.Kind() == Ptr || v.Kind() == UnsafePointer {
+ switch v.Kind() {
+ case Ptr, UnsafePointer, Chan, Func, Map:
w = uintptr(v.pointer())
- } else {
- w = uintptr(loadScalar(v.ptr, v.typ.size))
+ default:
+ memmove(unsafe.Pointer(&w), v.ptr, v.typ.size)
if ret_off_reg != 0 {
w = s390ReloadForRegister(
ret_type, w, ret_off_reg)
@@ -394,7 +395,7 @@ func s390_add_stackreg(in []Value, ap uintptr, rt *rtype, offset uintptr) ([]Val
p := unsafe_New(rt)
memmove(p, unsafe.Pointer(ap), rt.size)
- v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir}
+ v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap += rt.size
ap = align(ap, s390_arch_stack_slot_align)
@@ -413,7 +414,7 @@ func s390_add_memarg(in []Value, ap uintptr, rt *rtype) ([]Value, uintptr) {
p := unsafe_New(rt)
memmove(p, *(*unsafe.Pointer)(unsafe.Pointer(ap)), rt.size)
- v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir}
+ v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap += s390_arch_stack_slot_align
diff --git a/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_s390x.go b/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_s390x.go
index a0a5567f3b9..6e510156b34 100644
--- a/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_s390x.go
+++ b/llgo/third_party/gofrontend/libgo/go/reflect/makefuncgo_s390x.go
@@ -226,7 +226,7 @@ func S390xMakeFuncStubGo(regs *s390x_regs, c *makeFuncImpl) {
argloop:
for _, rt := range ftyp.in {
class, off_reg, off_slot := s390xClassifyParameter(rt)
- fl := flag(rt.Kind()) << flagKindShift
+ fl := flag(rt.Kind())
switch class {
case s390x_empty:
v := Value{rt, nil, fl | flagIndir}
@@ -317,10 +317,11 @@ argloop:
// Single return value in a general or floating point register.
v := out[0]
var w uintptr
- if v.Kind() == Ptr || v.Kind() == UnsafePointer {
+ switch v.Kind() {
+ case Ptr, UnsafePointer, Chan, Func, Map:
w = uintptr(v.pointer())
- } else {
- w = uintptr(loadScalar(v.ptr, v.typ.size))
+ default:
+ memmove(unsafe.Pointer(&w), v.ptr, v.typ.size)
if ret_off_reg != 0 {
w = s390xReloadForRegister(
ret_type, w, ret_off_reg)
@@ -370,7 +371,7 @@ func s390x_add_stackreg(in []Value, ap uintptr, rt *rtype, offset uintptr) ([]Va
p := unsafe_New(rt)
memmove(p, unsafe.Pointer(ap), rt.size)
- v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir}
+ v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap += rt.size
ap = align(ap, s390x_arch_stack_slot_align)
@@ -389,7 +390,7 @@ func s390x_add_memarg(in []Value, ap uintptr, rt *rtype) ([]Value, uintptr) {
p := unsafe_New(rt)
memmove(p, *(*unsafe.Pointer)(unsafe.Pointer(ap)), rt.size)
- v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir}
+ v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap += s390x_arch_stack_slot_align
diff --git a/llgo/third_party/gofrontend/libgo/go/reflect/type.go b/llgo/third_party/gofrontend/libgo/go/reflect/type.go
index 91697c4b56b..101135410cf 100644
--- a/llgo/third_party/gofrontend/libgo/go/reflect/type.go
+++ b/llgo/third_party/gofrontend/libgo/go/reflect/type.go
@@ -99,6 +99,9 @@ type Type interface {
// ConvertibleTo returns true if a value of the type is convertible to type u.
ConvertibleTo(u Type) bool
+ // Comparable returns true if values of this type are comparable.
+ Comparable() bool
+
// Methods applicable only to some types, depending on Kind.
// The methods allowed for each kind are:
//
@@ -249,7 +252,7 @@ type rtype struct {
align int8 // alignment of variable with this type
fieldAlign uint8 // alignment of struct field with this type
_ uint8 // unused/padding
- size uintptr // size in bytes
+ size uintptr
hash uint32 // hash of type; avoids computation in hash tables
hashfn uintptr // hash function code
@@ -331,8 +334,6 @@ type mapType struct {
rtype `reflect:"map"`
key *rtype // map key type
elem *rtype // map element (value) type
- // bucket *rtype // internal bucket structure
- // hmap *rtype // internal map header
}
// ptrType represents a pointer type.
@@ -401,11 +402,11 @@ type Method struct {
Index int // index for Type.Method
}
-// High bit says whether type has
-// embedded pointers,to help garbage collector.
const (
- kindMask = 0x7f
- kindNoPointers = 0x80
+ kindDirectIface = 1 << 5
+ kindGCProg = 1 << 6 // Type.gc points to GC program
+ kindNoPointers = 1 << 7
+ kindMask = (1 << 5) - 1
)
func (k Kind) String() string {
@@ -513,7 +514,7 @@ func (t *uncommonType) Method(i int) (m Method) {
if p.name != nil {
m.Name = *p.name
}
- fl := flag(Func) << flagKindShift
+ fl := flag(Func)
if p.pkgPath != nil {
m.PkgPath = *p.pkgPath
fl |= flagRO
@@ -522,7 +523,7 @@ func (t *uncommonType) Method(i int) (m Method) {
m.Type = toType(mt)
x := new(unsafe.Pointer)
*x = unsafe.Pointer(&p.tfn)
- m.Func = Value{mt, unsafe.Pointer(x) /* 0, */, fl | flagIndir | flagMethodFn}
+ m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
m.Index = i
return
}
@@ -1146,7 +1147,6 @@ func (t *rtype) ptrTo() *rtype {
q := canonicalize(&p.rtype)
p = (*ptrType)(unsafe.Pointer(q.(*rtype)))
- ptrMap.m[t] = p
ptrMap.Unlock()
return &p.rtype
}
@@ -1185,6 +1185,34 @@ func (t *rtype) ConvertibleTo(u Type) bool {
return convertOp(uu, t) != nil
}
+func (t *rtype) Comparable() bool {
+ switch t.Kind() {
+ case Bool, Int, Int8, Int16, Int32, Int64,
+ Uint, Uint8, Uint16, Uint32, Uint64, Uintptr,
+ Float32, Float64, Complex64, Complex128,
+ Chan, Interface, Ptr, String, UnsafePointer:
+ return true
+
+ case Func, Map, Slice:
+ return false
+
+ case Array:
+ return (*arrayType)(unsafe.Pointer(t)).elem.Comparable()
+
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for i := range tt.fields {
+ if !tt.fields[i].typ.Comparable() {
+ return false
+ }
+ }
+ return true
+
+ default:
+ panic("reflect: impossible")
+ }
+}
+
// implements returns true if the type V implements the interface type T.
func implements(T, V *rtype) bool {
if T.Kind() != Interface {
@@ -1419,11 +1447,6 @@ type chanGC struct {
end uintptr // _GC_END
}
-type badGC struct {
- width uintptr
- end uintptr
-}
-
// ChanOf returns the channel type with the given direction and element type.
// For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
//
@@ -1536,8 +1559,6 @@ func MapOf(key, elem Type) Type {
mt.key = ktyp
mt.elem = etyp
- // mt.bucket = bucketOf(ktyp, etyp)
- // mt.hmap = hMapOf(mt.bucket)
mt.uncommonType = nil
mt.ptrToThis = nil
mt.zero = unsafe.Pointer(&make([]byte, mt.size)[0])
@@ -1559,57 +1580,151 @@ func MapOf(key, elem Type) Type {
return cachePut(ckey, &mt.rtype)
}
-// Make sure these routines stay in sync with ../../pkg/runtime/hashmap.c!
+// gcProg is a helper type for generatation of GC pointer info.
+type gcProg struct {
+ gc []byte
+ size uintptr // size of type in bytes
+}
+
+func (gc *gcProg) append(v byte) {
+ gc.align(unsafe.Sizeof(uintptr(0)))
+ gc.appendWord(v)
+}
+
+// Appends t's type info to the current program.
+func (gc *gcProg) appendProg(t *rtype) {
+ gc.align(uintptr(t.align))
+ if !t.pointers() {
+ gc.size += t.size
+ return
+ }
+ switch t.Kind() {
+ default:
+ panic("reflect: non-pointer type marked as having pointers")
+ case Ptr, UnsafePointer, Chan, Func, Map:
+ gc.appendWord(bitsPointer)
+ case Slice:
+ gc.appendWord(bitsPointer)
+ gc.appendWord(bitsScalar)
+ gc.appendWord(bitsScalar)
+ case String:
+ gc.appendWord(bitsPointer)
+ gc.appendWord(bitsScalar)
+ case Array:
+ c := t.Len()
+ e := t.Elem().common()
+ for i := 0; i < c; i++ {
+ gc.appendProg(e)
+ }
+ case Interface:
+ gc.appendWord(bitsMultiWord)
+ if t.NumMethod() == 0 {
+ gc.appendWord(bitsEface)
+ } else {
+ gc.appendWord(bitsIface)
+ }
+ case Struct:
+ c := t.NumField()
+ for i := 0; i < c; i++ {
+ gc.appendProg(t.Field(i).Type.common())
+ }
+ gc.align(uintptr(t.align))
+ }
+}
+
+func (gc *gcProg) appendWord(v byte) {
+ ptrsize := unsafe.Sizeof(uintptr(0))
+ if gc.size%ptrsize != 0 {
+ panic("reflect: unaligned GC program")
+ }
+ nptr := gc.size / ptrsize
+ for uintptr(len(gc.gc)) < nptr/2+1 {
+ gc.gc = append(gc.gc, 0x44) // BitsScalar
+ }
+ gc.gc[nptr/2] &= ^(3 << ((nptr%2)*4 + 2))
+ gc.gc[nptr/2] |= v << ((nptr%2)*4 + 2)
+ gc.size += ptrsize
+}
+
+func (gc *gcProg) finalize() unsafe.Pointer {
+ if gc.size == 0 {
+ return nil
+ }
+ ptrsize := unsafe.Sizeof(uintptr(0))
+ gc.align(ptrsize)
+ nptr := gc.size / ptrsize
+ for uintptr(len(gc.gc)) < nptr/2+1 {
+ gc.gc = append(gc.gc, 0x44) // BitsScalar
+ }
+ // If number of words is odd, repeat the mask twice.
+ // Compiler does the same.
+ if nptr%2 != 0 {
+ for i := uintptr(0); i < nptr; i++ {
+ gc.appendWord(extractGCWord(gc.gc, i))
+ }
+ }
+ return unsafe.Pointer(&gc.gc[0])
+}
+
+func extractGCWord(gc []byte, i uintptr) byte {
+ return (gc[i/2] >> ((i%2)*4 + 2)) & 3
+}
+
+func (gc *gcProg) align(a uintptr) {
+ gc.size = align(gc.size, a)
+}
+
+// These constants must stay in sync with ../runtime/mgc0.h.
+const (
+ bitsScalar = 1
+ bitsPointer = 2
+ bitsMultiWord = 3
+
+ bitsIface = 2
+ bitsEface = 3
+)
+
+// Make sure these routines stay in sync with ../../runtime/hashmap.go!
// These types exist only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in string
// for possible debugging use.
const (
- _BUCKETSIZE = 8
- _MAXKEYSIZE = 128
- _MAXVALSIZE = 128
+ bucketSize = 8
+ maxKeySize = 128
+ maxValSize = 128
)
func bucketOf(ktyp, etyp *rtype) *rtype {
- if ktyp.size > _MAXKEYSIZE {
+ if ktyp.size > maxKeySize {
ktyp = PtrTo(ktyp).(*rtype)
}
- if etyp.size > _MAXVALSIZE {
+ if etyp.size > maxValSize {
etyp = PtrTo(etyp).(*rtype)
}
ptrsize := unsafe.Sizeof(uintptr(0))
- gc := make([]uintptr, 1) // first entry is size, filled in at the end
- offset := _BUCKETSIZE * unsafe.Sizeof(uint8(0)) // topbits
- gc = append(gc, _GC_PTR, offset, 0 /*self pointer set below*/) // overflow
- offset += ptrsize
-
+ var gc gcProg
+ // topbits
+ for i := 0; i < int(bucketSize*unsafe.Sizeof(uint8(0))/ptrsize); i++ {
+ gc.append(bitsScalar)
+ }
+ gc.append(bitsPointer) // overflow
if runtime.GOARCH == "amd64p32" {
- offset += 4
+ gc.append(bitsScalar)
}
-
// keys
- if ktyp.kind&kindNoPointers == 0 {
- gc = append(gc, _GC_ARRAY_START, offset, _BUCKETSIZE, ktyp.size)
- gc = appendGCProgram(gc, ktyp)
- gc = append(gc, _GC_ARRAY_NEXT)
+ for i := 0; i < bucketSize; i++ {
+ gc.appendProg(ktyp)
}
- offset += _BUCKETSIZE * ktyp.size
-
// values
- if etyp.kind&kindNoPointers == 0 {
- gc = append(gc, _GC_ARRAY_START, offset, _BUCKETSIZE, etyp.size)
- gc = appendGCProgram(gc, etyp)
- gc = append(gc, _GC_ARRAY_NEXT)
+ for i := 0; i < bucketSize; i++ {
+ gc.appendProg(etyp)
}
- offset += _BUCKETSIZE * etyp.size
-
- gc = append(gc, _GC_END)
- gc[0] = offset
- gc[3] = uintptr(unsafe.Pointer(&gc[0])) // set self pointer
b := new(rtype)
- b.size = offset
- // b.gc = unsafe.Pointer(&gc[0])
+ b.size = gc.size
+ // b.gc[0] = gc.finalize()
+ b.kind |= kindGCProg
s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
b.string = &s
return b
@@ -1756,6 +1871,8 @@ func SliceOf(t Type) Type {
//
// TODO(rsc): Unexported for now. Export once the alg field is set correctly
// for the type. This may require significant work.
+//
+// TODO(rsc): TestArrayOf is also disabled. Re-enable.
func arrayOf(count int, elem Type) Type {
typ := elem.(*rtype)
slice := SliceOf(elem)
@@ -1774,6 +1891,7 @@ func arrayOf(count int, elem Type) Type {
prototype := *(**arrayType)(unsafe.Pointer(&iarray))
array := new(arrayType)
*array = *prototype
+ // TODO: Set extra kind bits correctly.
array.string = &s
// gccgo uses a different hash.
@@ -1794,6 +1912,7 @@ func arrayOf(count int, elem Type) Type {
array.fieldAlign = typ.fieldAlign
// TODO: array.alg
// TODO: array.gc
+ // TODO:
array.uncommonType = nil
array.ptrToThis = nil
array.zero = unsafe.Pointer(&make([]byte, array.size)[0])
@@ -1845,3 +1964,68 @@ func toType(p *rtype) Type {
}
return canonicalize(p)
}
+
+// ifaceIndir reports whether t is stored indirectly in an interface value.
+func ifaceIndir(t *rtype) bool {
+ return t.kind&kindDirectIface == 0
+}
+
+// Layout matches runtime.BitVector (well enough).
+type bitVector struct {
+ n uint32 // number of bits
+ data []byte
+}
+
+// append a bit pair to the bitmap.
+func (bv *bitVector) append2(bits uint8) {
+ // assume bv.n is a multiple of 2, since append2 is the only operation.
+ if bv.n%8 == 0 {
+ bv.data = append(bv.data, 0)
+ }
+ bv.data[bv.n/8] |= bits << (bv.n % 8)
+ bv.n += 2
+}
+
+func addTypeBits(bv *bitVector, offset *uintptr, t *rtype) {
+ *offset = align(*offset, uintptr(t.align))
+ if t.kind&kindNoPointers != 0 {
+ *offset += t.size
+ return
+ }
+
+ switch Kind(t.kind & kindMask) {
+ case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
+ // 1 pointer at start of representation
+ for bv.n < 2*uint32(*offset/uintptr(ptrSize)) {
+ bv.append2(bitsScalar)
+ }
+ bv.append2(bitsPointer)
+
+ case Interface:
+ // 2 pointers
+ for bv.n < 2*uint32(*offset/uintptr(ptrSize)) {
+ bv.append2(bitsScalar)
+ }
+ bv.append2(bitsPointer)
+ bv.append2(bitsPointer)
+
+ case Array:
+ // repeat inner type
+ tt := (*arrayType)(unsafe.Pointer(t))
+ for i := 0; i < int(tt.len); i++ {
+ addTypeBits(bv, offset, tt.elem)
+ }
+
+ case Struct:
+ // apply fields
+ tt := (*structType)(unsafe.Pointer(t))
+ start := *offset
+ for i := range tt.fields {
+ f := &tt.fields[i]
+ off := start + f.offset
+ addTypeBits(bv, &off, f.typ)
+ }
+ }
+
+ *offset += t.size
+}
diff --git a/llgo/third_party/gofrontend/libgo/go/reflect/value.go b/llgo/third_party/gofrontend/libgo/go/reflect/value.go
index c36e9954427..09210b37b70 100644
--- a/llgo/third_party/gofrontend/libgo/go/reflect/value.go
+++ b/llgo/third_party/gofrontend/libgo/go/reflect/value.go
@@ -7,40 +7,12 @@ package reflect
import (
"math"
"runtime"
- "strconv"
"unsafe"
)
-const bigEndian = false // can be smarter if we find a big-endian machine
const ptrSize = unsafe.Sizeof((*byte)(nil))
const cannotSet = "cannot set value obtained from unexported struct field"
-// TODO: This will have to go away when
-// the new gc goes in.
-func memmove(adst, asrc unsafe.Pointer, n uintptr) {
- dst := uintptr(adst)
- src := uintptr(asrc)
- switch {
- case src < dst && src+n > dst:
- // byte copy backward
- // careful: i is unsigned
- for i := n; i > 0; {
- i--
- *(*byte)(unsafe.Pointer(dst + i)) = *(*byte)(unsafe.Pointer(src + i))
- }
- case (n|src|dst)&(ptrSize-1) != 0:
- // byte copy forward
- for i := uintptr(0); i < n; i++ {
- *(*byte)(unsafe.Pointer(dst + i)) = *(*byte)(unsafe.Pointer(src + i))
- }
- default:
- // word copy forward
- for i := uintptr(0); i < n; i += ptrSize {
- *(*uintptr)(unsafe.Pointer(dst + i)) = *(*uintptr)(unsafe.Pointer(src + i))
- }
- }
-}
-
// Value is the reflection interface to a Go value.
//
// Not all methods apply to all kinds of values. Restrictions,
@@ -64,16 +36,8 @@ type Value struct {
// Pointer-valued data or, if flagIndir is set, pointer to data.
// Valid when either flagIndir is set or typ.pointers() is true.
- // Gccgo always uses this field.
ptr unsafe.Pointer
- // Non-pointer-valued data. When the data is smaller
- // than a word, it begins at the first byte (in the memory
- // address sense) of this field.
- // Valid when flagIndir is not set and typ.pointers() is false.
- // Gccgo never uses this field.
- // scalar uintptr
-
// flag holds metadata about the value.
// The lowest bits are flag bits:
// - flagRO: obtained via unexported field, so read-only
@@ -84,7 +48,7 @@ type Value struct {
// This repeats typ.Kind() except for method values.
// The remaining 23+ bits give a method number for method values.
// If flag.kind() != Func, code can assume that flagMethod is unset.
- // If typ.size > ptrSize, code can assume that flagIndir is set.
+ // If ifaceIndir(typ), code can assume that flagIndir is set.
flag
// A method value represents a curried method invocation
@@ -97,19 +61,18 @@ type Value struct {
type flag uintptr
const (
- flagRO flag = 1 << iota
- flagIndir
- flagAddr
- flagMethod
- flagMethodFn // gccgo: first fn parameter is always pointer
- flagKindShift = iota
flagKindWidth = 5 // there are 27 kinds
flagKindMask flag = 1<<flagKindWidth - 1
- flagMethodShift = flagKindShift + flagKindWidth
+ flagRO flag = 1 << 5
+ flagIndir flag = 1 << 6
+ flagAddr flag = 1 << 7
+ flagMethod flag = 1 << 8
+ flagMethodFn flag = 1 << 9 // gccgo: first fn parameter is always pointer
+ flagMethodShift = 10
)
func (f flag) kind() Kind {
- return Kind((f >> flagKindShift) & flagKindMask)
+ return Kind(f & flagKindMask)
}
// pointer returns the underlying pointer represented by v.
@@ -131,11 +94,11 @@ func packEface(v Value) interface{} {
e := (*emptyInterface)(unsafe.Pointer(&i))
// First, fill in the data portion of the interface.
switch {
- case v.Kind() != Ptr && v.Kind() != UnsafePointer:
- // Value is indirect, and so is the interface we're making.
+ case ifaceIndir(t):
if v.flag&flagIndir == 0 {
- panic("reflect: missing flagIndir")
+ panic("bad indir")
}
+ // Value is indirect, and so is the interface we're making.
ptr := v.ptr
if v.flag&flagAddr != 0 {
// TODO: pass safe boolean from valueInterface so
@@ -144,23 +107,14 @@ func packEface(v Value) interface{} {
memmove(c, ptr, t.size)
ptr = c
}
- e.word = iword(ptr)
+ e.word = ptr
case v.flag&flagIndir != 0:
// Value is indirect, but interface is direct. We need
// to load the data at v.ptr into the interface data word.
- if t.pointers() {
- e.word = iword(*(*unsafe.Pointer)(v.ptr))
- } else {
- e.word = iword(loadScalar(v.ptr, t.size))
- }
+ e.word = *(*unsafe.Pointer)(v.ptr)
default:
// Value is direct, and so is the interface.
- if t.pointers() {
- e.word = iword(v.ptr)
- } else {
- // e.word = iword(v.scalar)
- panic("reflect: missing flagIndir")
- }
+ e.word = v.ptr
}
// Now, fill in the type portion. We're very careful here not
// to have any operation between the e.word and e.typ assignments
@@ -178,8 +132,8 @@ func unpackEface(i interface{}) Value {
if t == nil {
return Value{}
}
- f := flag(t.Kind()) << flagKindShift
- if t.Kind() != Ptr && t.Kind() != UnsafePointer {
+ f := flag(t.Kind())
+ if ifaceIndir(t) {
f |= flagIndir
}
return Value{t, unsafe.Pointer(e.word), f}
@@ -211,78 +165,10 @@ func methodName() string {
return f.Name()
}
-// An iword is the word that would be stored in an
-// interface to represent a given value v. Specifically, if v is
-// bigger than a pointer, its word is a pointer to v's data.
-// Otherwise, its word holds the data stored
-// in its leading bytes (so is not a pointer).
-// This type is very dangerous for the garbage collector because
-// it must be treated conservatively. We try to never expose it
-// to the GC here so that GC remains precise.
-type iword unsafe.Pointer
-
-// loadScalar loads n bytes at p from memory into a uintptr
-// that forms the second word of an interface. The data
-// must be non-pointer in nature.
-func loadScalar(p unsafe.Pointer, n uintptr) uintptr {
- // Run the copy ourselves instead of calling memmove
- // to avoid moving w to the heap.
- var w uintptr
- switch n {
- default:
- panic("reflect: internal error: loadScalar of " + strconv.Itoa(int(n)) + "-byte value")
- case 0:
- case 1:
- *(*uint8)(unsafe.Pointer(&w)) = *(*uint8)(p)
- case 2:
- *(*uint16)(unsafe.Pointer(&w)) = *(*uint16)(p)
- case 3:
- *(*[3]byte)(unsafe.Pointer(&w)) = *(*[3]byte)(p)
- case 4:
- *(*uint32)(unsafe.Pointer(&w)) = *(*uint32)(p)
- case 5:
- *(*[5]byte)(unsafe.Pointer(&w)) = *(*[5]byte)(p)
- case 6:
- *(*[6]byte)(unsafe.Pointer(&w)) = *(*[6]byte)(p)
- case 7:
- *(*[7]byte)(unsafe.Pointer(&w)) = *(*[7]byte)(p)
- case 8:
- *(*uint64)(unsafe.Pointer(&w)) = *(*uint64)(p)
- }
- return w
-}
-
-// storeScalar stores n bytes from w into p.
-func storeScalar(p unsafe.Pointer, w uintptr, n uintptr) {
- // Run the copy ourselves instead of calling memmove
- // to avoid moving w to the heap.
- switch n {
- default:
- panic("reflect: internal error: storeScalar of " + strconv.Itoa(int(n)) + "-byte value")
- case 0:
- case 1:
- *(*uint8)(p) = *(*uint8)(unsafe.Pointer(&w))
- case 2:
- *(*uint16)(p) = *(*uint16)(unsafe.Pointer(&w))
- case 3:
- *(*[3]byte)(p) = *(*[3]byte)(unsafe.Pointer(&w))
- case 4:
- *(*uint32)(p) = *(*uint32)(unsafe.Pointer(&w))
- case 5:
- *(*[5]byte)(p) = *(*[5]byte)(unsafe.Pointer(&w))
- case 6:
- *(*[6]byte)(p) = *(*[6]byte)(unsafe.Pointer(&w))
- case 7:
- *(*[7]byte)(p) = *(*[7]byte)(unsafe.Pointer(&w))
- case 8:
- *(*uint64)(p) = *(*uint64)(unsafe.Pointer(&w))
- }
-}
-
// emptyInterface is the header for an interface{} value.
type emptyInterface struct {
typ *rtype
- word iword
+ word unsafe.Pointer
}
// nonEmptyInterface is the header for a interface value with methods.
@@ -292,7 +178,7 @@ type nonEmptyInterface struct {
typ *rtype // dynamic concrete type
fun [100000]unsafe.Pointer // method table
}
- word iword
+ word unsafe.Pointer
}
// mustBe panics if f's kind is not expected.
@@ -302,9 +188,8 @@ type nonEmptyInterface struct {
// v.flag.mustBe(Bool), which will only bother to copy the
// single important word for the receiver.
func (f flag) mustBe(expected Kind) {
- k := f.kind()
- if k != expected {
- panic(&ValueError{methodName(), k})
+ if f.kind() != expected {
+ panic(&ValueError{methodName(), f.kind()})
}
}
@@ -344,18 +229,14 @@ func (v Value) Addr() Value {
if v.flag&flagAddr == 0 {
panic("reflect.Value.Addr of unaddressable value")
}
- return Value{v.typ.ptrTo(), v.ptr /* 0, */, (v.flag & flagRO) | flag(Ptr)<<flagKindShift}
+ return Value{v.typ.ptrTo(), v.ptr, (v.flag & flagRO) | flag(Ptr)}
}
// Bool returns v's underlying value.
// It panics if v's kind is not Bool.
func (v Value) Bool() bool {
v.mustBe(Bool)
- if v.flag&flagIndir != 0 {
- return *(*bool)(v.ptr)
- }
- // return *(*bool)(unsafe.Pointer(&v.scalar))
- panic("reflect: missing flagIndir")
+ return *(*bool)(v.ptr)
}
// Bytes returns v's underlying value.
@@ -594,7 +475,7 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn
i := methodIndex
if v.typ.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(v.typ))
- if i < 0 || i >= len(tt.methods) {
+ if uint(i) >= uint(len(tt.methods)) {
panic("reflect: internal error: invalid method index")
}
m := &tt.methods[i]
@@ -611,7 +492,7 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn
} else {
rcvrtype = v.typ
ut := v.typ.uncommon()
- if ut == nil || i < 0 || i >= len(ut.methods) {
+ if ut == nil || uint(i) >= uint(len(ut.methods)) {
panic("reflect: internal error: invalid method index")
}
m := &ut.methods[i]
@@ -634,19 +515,10 @@ func storeRcvr(v Value, p unsafe.Pointer) {
// the interface data word becomes the receiver word
iface := (*nonEmptyInterface)(v.ptr)
*(*unsafe.Pointer)(p) = unsafe.Pointer(iface.word)
- } else if v.flag&flagIndir != 0 {
- if t.size > ptrSize {
- *(*unsafe.Pointer)(p) = v.ptr
- } else if t.pointers() {
- *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr)
- } else {
- *(*uintptr)(p) = loadScalar(v.ptr, t.size)
- }
- } else if t.pointers() {
- *(*unsafe.Pointer)(p) = v.ptr
+ } else if v.flag&flagIndir != 0 && !ifaceIndir(t) {
+ *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr)
} else {
- // *(*uintptr)(p) = v.scalar
- panic("reflect: missing flagIndir")
+ *(*unsafe.Pointer)(p) = v.ptr
}
}
@@ -679,7 +551,7 @@ func (v Value) Cap() int {
// Slice is always bigger than a word; assume flagIndir.
return (*sliceHeader)(v.ptr).Cap
}
- panic(&ValueError{"reflect.Value.Cap", k})
+ panic(&ValueError{"reflect.Value.Cap", v.kind()})
}
// Close closes the channel v.
@@ -696,16 +568,11 @@ func (v Value) Complex() complex128 {
k := v.kind()
switch k {
case Complex64:
- if v.flag&flagIndir != 0 {
- return complex128(*(*complex64)(v.ptr))
- }
- // return complex128(*(*complex64)(unsafe.Pointer(&v.scalar)))
- panic("reflect: missing flagIndir")
+ return complex128(*(*complex64)(v.ptr))
case Complex128:
- // complex128 is always bigger than a word; assume flagIndir.
return *(*complex128)(v.ptr)
}
- panic(&ValueError{"reflect.Value.Complex", k})
+ panic(&ValueError{"reflect.Value.Complex", v.kind()})
}
// Elem returns the value that the interface v contains
@@ -725,7 +592,9 @@ func (v Value) Elem() Value {
})(v.ptr))
}
x := unpackEface(eface)
- x.flag |= v.flag & flagRO
+ if x.flag != 0 {
+ x.flag |= v.flag & flagRO
+ }
return x
case Ptr:
ptr := v.ptr
@@ -739,58 +608,46 @@ func (v Value) Elem() Value {
tt := (*ptrType)(unsafe.Pointer(v.typ))
typ := tt.elem
fl := v.flag&flagRO | flagIndir | flagAddr
- fl |= flag(typ.Kind() << flagKindShift)
- return Value{typ, ptr /* 0, */, fl}
+ fl |= flag(typ.Kind())
+ return Value{typ, ptr, fl}
}
- panic(&ValueError{"reflect.Value.Elem", k})
+ panic(&ValueError{"reflect.Value.Elem", v.kind()})
}
// Field returns the i'th field of the struct v.
// It panics if v's Kind is not Struct or i is out of range.
func (v Value) Field(i int) Value {
- v.mustBe(Struct)
+ if v.kind() != Struct {
+ panic(&ValueError{"reflect.Value.Field", v.kind()})
+ }
tt := (*structType)(unsafe.Pointer(v.typ))
- if i < 0 || i >= len(tt.fields) {
+ if uint(i) >= uint(len(tt.fields)) {
panic("reflect: Field index out of range")
}
field := &tt.fields[i]
typ := field.typ
// Inherit permission bits from v.
- fl := v.flag & (flagRO | flagIndir | flagAddr)
+ fl := v.flag&(flagRO|flagIndir|flagAddr) | flag(typ.Kind())
// Using an unexported field forces flagRO.
if field.pkgPath != nil {
fl |= flagRO
}
- fl |= flag(typ.Kind()) << flagKindShift
-
- var ptr unsafe.Pointer
- // var scalar uintptr
- switch {
- case fl&flagIndir != 0:
- // Indirect. Just bump pointer.
- ptr = unsafe.Pointer(uintptr(v.ptr) + field.offset)
- case typ.pointers():
- if field.offset != 0 {
- panic("field access of ptr value isn't at offset 0")
- }
- ptr = v.ptr
- case bigEndian:
- // Must be scalar. Discard leading bytes.
- // scalar = v.scalar << (field.offset * 8)
- panic("reflect: missing flagIndir")
- default:
- // Must be scalar. Discard leading bytes.
- // scalar = v.scalar >> (field.offset * 8)
- panic("reflect: missing flagIndir")
- }
-
- return Value{typ, ptr /* scalar, */, fl}
+ // Either flagIndir is set and v.ptr points at struct,
+ // or flagIndir is not set and v.ptr is the actual struct data.
+ // In the former case, we want v.ptr + offset.
+ // In the latter case, we must be have field.offset = 0,
+ // so v.ptr + field.offset is still okay.
+ ptr := unsafe.Pointer(uintptr(v.ptr) + field.offset)
+ return Value{typ, ptr, fl}
}
// FieldByIndex returns the nested field corresponding to index.
// It panics if v's Kind is not struct.
func (v Value) FieldByIndex(index []int) Value {
+ if len(index) == 1 {
+ return v.Field(index[0])
+ }
v.mustBe(Struct)
for i, x := range index {
if i > 0 {
@@ -822,7 +679,6 @@ func (v Value) FieldByName(name string) Value {
// It panics if v's Kind is not struct.
// It returns the zero Value if no field was found.
func (v Value) FieldByNameFunc(match func(string) bool) Value {
- v.mustBe(Struct)
if f, ok := v.typ.FieldByNameFunc(match); ok {
return v.FieldByIndex(f.Index)
}
@@ -835,19 +691,11 @@ func (v Value) Float() float64 {
k := v.kind()
switch k {
case Float32:
- if v.flag&flagIndir != 0 {
- return float64(*(*float32)(v.ptr))
- }
- // return float64(*(*float32)(unsafe.Pointer(&v.scalar)))
- panic("reflect: missing flagIndir")
+ return float64(*(*float32)(v.ptr))
case Float64:
- if v.flag&flagIndir != 0 {
- return *(*float64)(v.ptr)
- }
- // return *(*float64)(unsafe.Pointer(&v.scalar))
- panic("reflect: missing flagIndir")
+ return *(*float64)(v.ptr)
}
- panic(&ValueError{"reflect.Value.Float", k})
+ panic(&ValueError{"reflect.Value.Float", v.kind()})
}
var uint8Type = TypeOf(uint8(0)).(*rtype)
@@ -855,82 +703,54 @@ var uint8Type = TypeOf(uint8(0)).(*rtype)
// Index returns v's i'th element.
// It panics if v's Kind is not Array, Slice, or String or i is out of range.
func (v Value) Index(i int) Value {
- k := v.kind()
- switch k {
+ switch v.kind() {
case Array:
tt := (*arrayType)(unsafe.Pointer(v.typ))
- if i < 0 || i > int(tt.len) {
+ if uint(i) >= uint(tt.len) {
panic("reflect: array index out of range")
}
typ := tt.elem
- fl := v.flag & (flagRO | flagIndir | flagAddr) // bits same as overall array
- fl |= flag(typ.Kind()) << flagKindShift
offset := uintptr(i) * typ.size
- var val unsafe.Pointer
- switch {
- case fl&flagIndir != 0:
- // Indirect. Just bump pointer.
- val = unsafe.Pointer(uintptr(v.ptr) + offset)
- case typ.pointers():
- if offset != 0 {
- panic("can't Index(i) with i!=0 on ptrLike value")
- }
- val = v.ptr
- case bigEndian:
- // Direct. Discard leading bytes.
- // scalar = v.scalar << (offset * 8)
- panic("reflect: missing flagIndir")
- default:
- // Direct. Discard leading bytes.
- // scalar = v.scalar >> (offset * 8)
- panic("reflect: missing flagIndir")
- }
- return Value{typ, val /* scalar, */, fl}
+ // Either flagIndir is set and v.ptr points at array,
+ // or flagIndir is not set and v.ptr is the actual array data.
+ // In the former case, we want v.ptr + offset.
+ // In the latter case, we must be doing Index(0), so offset = 0,
+ // so v.ptr + offset is still okay.
+ val := unsafe.Pointer(uintptr(v.ptr) + offset)
+ fl := v.flag&(flagRO|flagIndir|flagAddr) | flag(typ.Kind()) // bits same as overall array
+ return Value{typ, val, fl}
case Slice:
// Element flag same as Elem of Ptr.
// Addressable, indirect, possibly read-only.
- fl := flagAddr | flagIndir | v.flag&flagRO
s := (*sliceHeader)(v.ptr)
- if i < 0 || i >= s.Len {
+ if uint(i) >= uint(s.Len) {
panic("reflect: slice index out of range")
}
tt := (*sliceType)(unsafe.Pointer(v.typ))
typ := tt.elem
- fl |= flag(typ.Kind()) << flagKindShift
val := unsafe.Pointer(uintptr(s.Data) + uintptr(i)*typ.size)
- return Value{typ, val /* 0, */, fl}
+ fl := flagAddr | flagIndir | v.flag&flagRO | flag(typ.Kind())
+ return Value{typ, val, fl}
case String:
- fl := v.flag&flagRO | flag(Uint8<<flagKindShift) | flagIndir
- s := (*StringHeader)(v.ptr)
- if i < 0 || i >= s.Len {
+ s := (*stringHeader)(v.ptr)
+ if uint(i) >= uint(s.Len) {
panic("reflect: string index out of range")
}
- b := uintptr(0)
- *(*byte)(unsafe.Pointer(&b)) = *(*byte)(unsafe.Pointer(uintptr(s.Data) + uintptr(i)))
- return Value{uint8Type, unsafe.Pointer(&b) /* 0, */, fl | flagIndir}
+ p := unsafe.Pointer(uintptr(s.Data) + uintptr(i))
+ fl := v.flag&flagRO | flag(Uint8) | flagIndir
+ return Value{uint8Type, p, fl}
}
- panic(&ValueError{"reflect.Value.Index", k})
+ panic(&ValueError{"reflect.Value.Index", v.kind()})
}
// Int returns v's underlying value, as an int64.
// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
func (v Value) Int() int64 {
k := v.kind()
- var p unsafe.Pointer
- if v.flag&flagIndir != 0 {
- p = v.ptr
- } else {
- // The escape analysis is good enough that &v.scalar
- // does not trigger a heap allocation.
- // p = unsafe.Pointer(&v.scalar)
- switch k {
- case Int, Int8, Int16, Int32, Int64:
- panic("reflect: missing flagIndir")
- }
- }
+ p := v.ptr
switch k {
case Int:
return int64(*(*int)(p))
@@ -943,7 +763,7 @@ func (v Value) Int() int64 {
case Int64:
return int64(*(*int64)(p))
}
- panic(&ValueError{"reflect.Value.Int", k})
+ panic(&ValueError{"reflect.Value.Int", v.kind()})
}
// CanInterface returns true if Interface can be used without panicking.
@@ -1040,7 +860,7 @@ func (v Value) IsNil() bool {
// Both are always bigger than a word; assume flagIndir.
return *(*unsafe.Pointer)(v.ptr) == nil
}
- panic(&ValueError{"reflect.Value.IsNil", k})
+ panic(&ValueError{"reflect.Value.IsNil", v.kind()})
}
// IsValid returns true if v represents a value.
@@ -1077,7 +897,7 @@ func (v Value) Len() int {
// String is bigger than a word; assume flagIndir.
return (*stringHeader)(v.ptr).Len
}
- panic(&ValueError{"reflect.Value.Len", k})
+ panic(&ValueError{"reflect.Value.Len", v.kind()})
}
// MapIndex returns the value associated with key in the map v.
@@ -1100,11 +920,8 @@ func (v Value) MapIndex(key Value) Value {
var k unsafe.Pointer
if key.flag&flagIndir != 0 {
k = key.ptr
- } else if key.typ.pointers() {
- k = unsafe.Pointer(&key.ptr)
} else {
- // k = unsafe.Pointer(&key.scalar)
- panic("reflect: missing flagIndir")
+ k = unsafe.Pointer(&key.ptr)
}
e := mapaccess(v.typ, v.pointer(), k)
if e == nil {
@@ -1112,17 +929,15 @@ func (v Value) MapIndex(key Value) Value {
}
typ := tt.elem
fl := (v.flag | key.flag) & flagRO
- fl |= flag(typ.Kind()) << flagKindShift
- if typ.Kind() != Ptr && typ.Kind() != UnsafePointer {
+ fl |= flag(typ.Kind())
+ if ifaceIndir(typ) {
// Copy result so future changes to the map
// won't change the underlying value.
c := unsafe_New(typ)
memmove(c, e, typ.size)
- return Value{typ, c /* 0, */, fl | flagIndir}
- } else if typ.pointers() {
- return Value{typ, *(*unsafe.Pointer)(e) /* 0, */, fl}
+ return Value{typ, c, fl | flagIndir}
} else {
- panic("reflect: can't happen")
+ return Value{typ, *(*unsafe.Pointer)(e), fl}
}
}
@@ -1135,10 +950,7 @@ func (v Value) MapKeys() []Value {
tt := (*mapType)(unsafe.Pointer(v.typ))
keyType := tt.key
- fl := v.flag&flagRO | flag(keyType.Kind())<<flagKindShift
- if keyType.Kind() != Ptr && keyType.Kind() != UnsafePointer {
- fl |= flagIndir
- }
+ fl := v.flag&flagRO | flag(keyType.Kind())
m := v.pointer()
mlen := int(0)
@@ -1156,16 +968,14 @@ func (v Value) MapKeys() []Value {
// we can do about it.
break
}
- if keyType.Kind() != Ptr && keyType.Kind() != UnsafePointer {
+ if ifaceIndir(keyType) {
// Copy result so future changes to the map
// won't change the underlying value.
c := unsafe_New(keyType)
memmove(c, key, keyType.size)
- a[i] = Value{keyType, c /* 0, */, fl | flagIndir}
- } else if keyType.pointers() {
- a[i] = Value{keyType, *(*unsafe.Pointer)(key) /* 0, */, fl}
+ a[i] = Value{keyType, c, fl | flagIndir}
} else {
- panic("reflect: can't happen")
+ a[i] = Value{keyType, *(*unsafe.Pointer)(key), fl}
}
mapiternext(it)
}
@@ -1180,16 +990,16 @@ func (v Value) Method(i int) Value {
if v.typ == nil {
panic(&ValueError{"reflect.Value.Method", Invalid})
}
- if v.flag&flagMethod != 0 || i < 0 || i >= v.typ.NumMethod() {
+ if v.flag&flagMethod != 0 || uint(i) >= uint(v.typ.NumMethod()) {
panic("reflect: Method index out of range")
}
if v.typ.Kind() == Interface && v.IsNil() {
panic("reflect: Method on nil interface value")
}
fl := v.flag & (flagRO | flagIndir)
- fl |= flag(Func) << flagKindShift
+ fl |= flag(Func)
fl |= flag(i)<<flagMethodShift | flagMethod
- return Value{v.typ, v.ptr /* v.scalar, */, fl}
+ return Value{v.typ, v.ptr, fl}
}
// NumMethod returns the number of methods in the value's method set.
@@ -1240,7 +1050,7 @@ func (v Value) OverflowComplex(x complex128) bool {
case Complex128:
return false
}
- panic(&ValueError{"reflect.Value.OverflowComplex", k})
+ panic(&ValueError{"reflect.Value.OverflowComplex", v.kind()})
}
// OverflowFloat returns true if the float64 x cannot be represented by v's type.
@@ -1253,7 +1063,7 @@ func (v Value) OverflowFloat(x float64) bool {
case Float64:
return false
}
- panic(&ValueError{"reflect.Value.OverflowFloat", k})
+ panic(&ValueError{"reflect.Value.OverflowFloat", v.kind()})
}
func overflowFloat32(x float64) bool {
@@ -1273,7 +1083,7 @@ func (v Value) OverflowInt(x int64) bool {
trunc := (x << (64 - bitSize)) >> (64 - bitSize)
return x != trunc
}
- panic(&ValueError{"reflect.Value.OverflowInt", k})
+ panic(&ValueError{"reflect.Value.OverflowInt", v.kind()})
}
// OverflowUint returns true if the uint64 x cannot be represented by v's type.
@@ -1286,7 +1096,7 @@ func (v Value) OverflowUint(x uint64) bool {
trunc := (x << (64 - bitSize)) >> (64 - bitSize)
return x != trunc
}
- panic(&ValueError{"reflect.Value.OverflowUint", k})
+ panic(&ValueError{"reflect.Value.OverflowUint", v.kind()})
}
// Pointer returns v's value as a uintptr.
@@ -1331,7 +1141,7 @@ func (v Value) Pointer() uintptr {
case Slice:
return (*SliceHeader)(v.ptr).Data
}
- panic(&ValueError{"reflect.Value.Pointer", k})
+ panic(&ValueError{"reflect.Value.Pointer", v.kind()})
}
// Recv receives and returns a value from the channel v.
@@ -1353,9 +1163,9 @@ func (v Value) recv(nb bool) (val Value, ok bool) {
panic("reflect: recv on send-only channel")
}
t := tt.elem
- val = Value{t, nil /* 0, */, flag(t.Kind()) << flagKindShift}
+ val = Value{t, nil, flag(t.Kind())}
var p unsafe.Pointer
- if t.Kind() != Ptr && t.Kind() != UnsafePointer {
+ if ifaceIndir(t) {
p = unsafe_New(t)
val.ptr = p
val.flag |= flagIndir
@@ -1390,11 +1200,8 @@ func (v Value) send(x Value, nb bool) (selected bool) {
var p unsafe.Pointer
if x.flag&flagIndir != 0 {
p = x.ptr
- } else if x.typ.pointers() {
- p = unsafe.Pointer(&x.ptr)
} else {
- // p = unsafe.Pointer(&x.scalar)
- panic("reflect: missing flagIndir")
+ p = unsafe.Pointer(&x.ptr)
}
return chansend(v.typ, v.pointer(), p, nb)
}
@@ -1412,11 +1219,8 @@ func (v Value) Set(x Value) {
x = x.assignTo("reflect.Set", v.typ, target)
if x.flag&flagIndir != 0 {
memmove(v.ptr, x.ptr, v.typ.size)
- } else if x.typ.pointers() {
- *(*unsafe.Pointer)(v.ptr) = x.ptr
} else {
- // memmove(v.ptr, unsafe.Pointer(&x.scalar), v.typ.size)
- panic("reflect: missing flagIndir")
+ *(*unsafe.Pointer)(v.ptr) = x.ptr
}
}
@@ -1456,7 +1260,7 @@ func (v Value) SetComplex(x complex128) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
- panic(&ValueError{"reflect.Value.SetComplex", k})
+ panic(&ValueError{"reflect.Value.SetComplex", v.kind()})
case Complex64:
*(*complex64)(v.ptr) = complex64(x)
case Complex128:
@@ -1470,7 +1274,7 @@ func (v Value) SetFloat(x float64) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
- panic(&ValueError{"reflect.Value.SetFloat", k})
+ panic(&ValueError{"reflect.Value.SetFloat", v.kind()})
case Float32:
*(*float32)(v.ptr) = float32(x)
case Float64:
@@ -1484,7 +1288,7 @@ func (v Value) SetInt(x int64) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
- panic(&ValueError{"reflect.Value.SetInt", k})
+ panic(&ValueError{"reflect.Value.SetInt", v.kind()})
case Int:
*(*int)(v.ptr) = int(x)
case Int8:
@@ -1505,7 +1309,7 @@ func (v Value) SetLen(n int) {
v.mustBeAssignable()
v.mustBe(Slice)
s := (*sliceHeader)(v.ptr)
- if n < 0 || n > int(s.Cap) {
+ if uint(n) > uint(s.Cap) {
panic("reflect: slice length out of range in SetLen")
}
s.Len = n
@@ -1539,11 +1343,8 @@ func (v Value) SetMapIndex(key, val Value) {
var k unsafe.Pointer
if key.flag&flagIndir != 0 {
k = key.ptr
- } else if key.typ.pointers() {
- k = unsafe.Pointer(&key.ptr)
} else {
- // k = unsafe.Pointer(&key.scalar)
- panic("reflect: missing flagIndir")
+ k = unsafe.Pointer(&key.ptr)
}
if val.typ == nil {
mapdelete(v.typ, v.pointer(), k)
@@ -1554,11 +1355,8 @@ func (v Value) SetMapIndex(key, val Value) {
var e unsafe.Pointer
if val.flag&flagIndir != 0 {
e = val.ptr
- } else if val.typ.pointers() {
- e = unsafe.Pointer(&val.ptr)
} else {
- // e = unsafe.Pointer(&val.scalar)
- panic("reflect: missing flagIndir")
+ e = unsafe.Pointer(&val.ptr)
}
mapassign(v.typ, v.pointer(), k, e)
}
@@ -1569,7 +1367,7 @@ func (v Value) SetUint(x uint64) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
- panic(&ValueError{"reflect.Value.SetUint", k})
+ panic(&ValueError{"reflect.Value.SetUint", v.kind()})
case Uint:
*(*uint)(v.ptr) = uint(x)
case Uint8:
@@ -1612,7 +1410,7 @@ func (v Value) Slice(i, j int) Value {
)
switch kind := v.kind(); kind {
default:
- panic(&ValueError{"reflect.Value.Slice", kind})
+ panic(&ValueError{"reflect.Value.Slice", v.kind()})
case Array:
if v.flag&flagAddr == 0 {
@@ -1635,7 +1433,7 @@ func (v Value) Slice(i, j int) Value {
panic("reflect.Value.Slice: string slice index out of bounds")
}
t := stringHeader{unsafe.Pointer(uintptr(s.Data) + uintptr(i)), j - i}
- return Value{v.typ, unsafe.Pointer(&t) /* 0, */, v.flag}
+ return Value{v.typ, unsafe.Pointer(&t), v.flag}
}
if i < 0 || j < i || j > cap {
@@ -1647,12 +1445,17 @@ func (v Value) Slice(i, j int) Value {
// Reinterpret as *sliceHeader to edit.
s := (*sliceHeader)(unsafe.Pointer(&x))
- s.Data = unsafe.Pointer(uintptr(base) + uintptr(i)*typ.elem.Size())
s.Len = j - i
s.Cap = cap - i
+ if cap-i > 0 {
+ s.Data = unsafe.Pointer(uintptr(base) + uintptr(i)*typ.elem.Size())
+ } else {
+ // do not advance pointer, to avoid pointing beyond end of slice
+ s.Data = base
+ }
- fl := v.flag&flagRO | flagIndir | flag(Slice)<<flagKindShift
- return Value{typ.common(), unsafe.Pointer(&x) /* 0, */, fl}
+ fl := v.flag&flagRO | flagIndir | flag(Slice)
+ return Value{typ.common(), unsafe.Pointer(&x), fl}
}
// Slice3 is the 3-index form of the slice operation: it returns v[i:j:k].
@@ -1666,7 +1469,7 @@ func (v Value) Slice3(i, j, k int) Value {
)
switch kind := v.kind(); kind {
default:
- panic(&ValueError{"reflect.Value.Slice3", kind})
+ panic(&ValueError{"reflect.Value.Slice3", v.kind()})
case Array:
if v.flag&flagAddr == 0 {
@@ -1694,12 +1497,17 @@ func (v Value) Slice3(i, j, k int) Value {
// Reinterpret as *sliceHeader to edit.
s := (*sliceHeader)(unsafe.Pointer(&x))
- s.Data = unsafe.Pointer(uintptr(base) + uintptr(i)*typ.elem.Size())
s.Len = j - i
s.Cap = k - i
+ if k-i > 0 {
+ s.Data = unsafe.Pointer(uintptr(base) + uintptr(i)*typ.elem.Size())
+ } else {
+ // do not advance pointer, to avoid pointing beyond end of slice
+ s.Data = base
+ }
- fl := v.flag&flagRO | flagIndir | flag(Slice)<<flagKindShift
- return Value{typ.common(), unsafe.Pointer(&x) /* 0, */, fl}
+ fl := v.flag&flagRO | flagIndir | flag(Slice)
+ return Value{typ.common(), unsafe.Pointer(&x), fl}
}
// String returns the string v's underlying value, as a string.
@@ -1715,7 +1523,7 @@ func (v Value) String() string {
}
// If you call String on a reflect.Value of other type, it's better to
// print something than to panic. Useful in debugging.
- return "<" + v.typ.String() + " Value>"
+ return "<" + v.Type().String() + " Value>"
}
// TryRecv attempts to receive a value from the channel v but will not block.
@@ -1756,7 +1564,7 @@ func (v Value) Type() Type {
if v.typ.Kind() == Interface {
// Method on interface.
tt := (*interfaceType)(unsafe.Pointer(v.typ))
- if i < 0 || i >= len(tt.methods) {
+ if uint(i) >= uint(len(tt.methods)) {
panic("reflect: internal error: invalid method index")
}
m := &tt.methods[i]
@@ -1764,7 +1572,7 @@ func (v Value) Type() Type {
}
// Method on concrete type.
ut := v.typ.uncommon()
- if ut == nil || i < 0 || i >= len(ut.methods) {
+ if ut == nil || uint(i) >= uint(len(ut.methods)) {
panic("reflect: internal error: invalid method index")
}
m := &ut.methods[i]
@@ -1775,18 +1583,7 @@ func (v Value) Type() Type {
// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
func (v Value) Uint() uint64 {
k := v.kind()
- var p unsafe.Pointer
- if v.flag&flagIndir != 0 {
- p = v.ptr
- } else {
- // The escape analysis is good enough that &v.scalar
- // does not trigger a heap allocation.
- // p = unsafe.Pointer(&v.scalar)
- switch k {
- case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
- panic("reflect: missing flagIndir")
- }
- }
+ p := v.ptr
switch k {
case Uint:
return uint64(*(*uint)(p))
@@ -1801,7 +1598,7 @@ func (v Value) Uint() uint64 {
case Uintptr:
return uint64(*(*uintptr)(p))
}
- panic(&ValueError{"reflect.Value.Uint", k})
+ panic(&ValueError{"reflect.Value.Uint", v.kind()})
}
// UnsafeAddr returns a pointer to v's data.
@@ -1940,17 +1737,6 @@ func Copy(dst, src Value) int {
n = sn
}
- // If sk is an in-line array, cannot take its address.
- // Instead, copy element by element.
- // TODO: memmove would be ok for this (sa = unsafe.Pointer(&v.scalar))
- // if we teach the compiler that ptrs don't escape from memmove.
- if src.flag&flagIndir == 0 {
- for i := 0; i < n; i++ {
- dst.Index(i).Set(src.Index(i))
- }
- return n
- }
-
// Copy via memmove.
var da, sa unsafe.Pointer
if dk == Array {
@@ -1958,7 +1744,9 @@ func Copy(dst, src Value) int {
} else {
da = (*sliceHeader)(dst.ptr).Data
}
- if sk == Array {
+ if src.flag&flagIndir == 0 {
+ sa = unsafe.Pointer(&src.ptr)
+ } else if sk == Array {
sa = src.ptr
} else {
sa = (*sliceHeader)(src.ptr).Data
@@ -1968,7 +1756,7 @@ func Copy(dst, src Value) int {
}
// A runtimeSelect is a single case passed to rselect.
-// This must match ../runtime/chan.c:/runtimeSelect
+// This must match ../runtime/select.go:/runtimeSelect
type runtimeSelect struct {
dir uintptr // 0, SendDir, or RecvDir
typ *rtype // channel type
@@ -1986,7 +1774,7 @@ func rselect([]runtimeSelect) (chosen int, recvOK bool)
// A SelectDir describes the communication direction of a select case.
type SelectDir int
-// NOTE: These values must match ../runtime/chan.c:/SelectDir.
+// NOTE: These values must match ../runtime/select.go:/selectDir.
const (
_ SelectDir = iota
@@ -2071,11 +1859,8 @@ func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) {
v = v.assignTo("reflect.Select", tt.elem, nil)
if v.flag&flagIndir != 0 {
rc.val = v.ptr
- } else if v.typ.pointers() {
- rc.val = unsafe.Pointer(&v.ptr)
} else {
- // rc.val = unsafe.Pointer(&v.scalar)
- panic("reflect: missing flagIndir")
+ rc.val = unsafe.Pointer(&v.ptr)
}
case SelectRecv:
@@ -2103,11 +1888,11 @@ func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) {
tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ))
t := tt.elem
p := runcases[chosen].val
- fl := flag(t.Kind()) << flagKindShift
- if t.Kind() != Ptr && t.Kind() != UnsafePointer {
- recv = Value{t, p /* 0, */, fl | flagIndir}
+ fl := flag(t.Kind())
+ if ifaceIndir(t) {
+ recv = Value{t, p, fl | flagIndir}
} else {
- recv = Value{t, *(*unsafe.Pointer)(p) /* 0, */, fl}
+ recv = Value{t, *(*unsafe.Pointer)(p), fl}
}
}
return chosen, recv, recvOK
@@ -2138,7 +1923,7 @@ func MakeSlice(typ Type, len, cap int) Value {
}
s := sliceHeader{unsafe_NewArray(typ.Elem().(*rtype), cap), len, cap}
- return Value{typ.common(), unsafe.Pointer(&s) /* 0, */, flagIndir | flag(Slice)<<flagKindShift}
+ return Value{typ.common(), unsafe.Pointer(&s), flagIndir | flag(Slice)}
}
// MakeChan creates a new channel with the specified type and buffer size.
@@ -2153,7 +1938,7 @@ func MakeChan(typ Type, buffer int) Value {
panic("reflect.MakeChan: unidirectional channel type")
}
ch := makechan(typ.(*rtype), uint64(buffer))
- return Value{typ.common(), unsafe.Pointer(&ch) /* 0, */, flagIndir | (flag(Chan) << flagKindShift)}
+ return Value{typ.common(), unsafe.Pointer(&ch), flag(Chan) | flagIndir}
}
// MakeMap creates a new map of the specified type.
@@ -2162,7 +1947,7 @@ func MakeMap(typ Type) Value {
panic("reflect.MakeMap of non-map type")
}
m := makemap(typ.(*rtype))
- return Value{typ.common(), unsafe.Pointer(&m) /* 0, */, flagIndir | (flag(Map) << flagKindShift)}
+ return Value{typ.common(), unsafe.Pointer(&m), flag(Map) | flagIndir}
}
// Indirect returns the value that v points to.
@@ -2202,11 +1987,11 @@ func Zero(typ Type) Value {
panic("reflect: Zero(nil)")
}
t := typ.common()
- fl := flag(t.Kind()) << flagKindShift
- if t.Kind() == Ptr || t.Kind() == UnsafePointer {
- return Value{t, nil /* 0, */, fl}
+ fl := flag(t.Kind())
+ if ifaceIndir(t) {
+ return Value{t, unsafe_New(typ.(*rtype)), fl | flagIndir}
}
- return Value{t, unsafe_New(typ.(*rtype)) /* 0, */, fl | flagIndir}
+ return Value{t, nil, fl}
}
// New returns a Value representing a pointer to a new zero value
@@ -2216,15 +2001,15 @@ func New(typ Type) Value {
panic("reflect: New(nil)")
}
ptr := unsafe_New(typ.(*rtype))
- fl := flag(Ptr) << flagKindShift
- return Value{typ.common().ptrTo(), ptr /* 0, */, fl}
+ fl := flag(Ptr)
+ return Value{typ.common().ptrTo(), ptr, fl}
}
// NewAt returns a Value representing a pointer to a value of the
// specified type, using p as that pointer.
func NewAt(typ Type, p unsafe.Pointer) Value {
- fl := flag(Ptr) << flagKindShift
- return Value{typ.common().ptrTo(), p /* 0, */, fl}
+ fl := flag(Ptr)
+ return Value{typ.common().ptrTo(), p, fl}
}
// assignTo returns a value v that can be assigned directly to typ.
@@ -2241,8 +2026,8 @@ func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value
// Same memory layout, so no harm done.
v.typ = dst
fl := v.flag & (flagRO | flagAddr | flagIndir)
- fl |= flag(dst.Kind()) << flagKindShift
- return Value{dst, v.ptr /* v.scalar, */, fl}
+ fl |= flag(dst.Kind())
+ return Value{dst, v.ptr, fl}
case implements(dst, v.typ):
if target == nil {
@@ -2254,7 +2039,7 @@ func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value
} else {
ifaceE2I(dst, x, target)
}
- return Value{dst, target /* 0, */, flagIndir | flag(Interface)<<flagKindShift}
+ return Value{dst, target, flagIndir | flag(Interface)}
}
// Failed.
@@ -2362,86 +2147,66 @@ func convertOp(dst, src *rtype) func(Value, Type) Value {
// where t is a signed or unsigned int type.
func makeInt(f flag, bits uint64, t Type) Value {
typ := t.common()
- if typ.size > ptrSize {
- // Assume ptrSize >= 4, so this must be uint64.
- ptr := unsafe_New(typ)
- *(*uint64)(unsafe.Pointer(ptr)) = bits
- return Value{typ, ptr /* 0, */, f | flagIndir | flag(typ.Kind())<<flagKindShift}
- }
- var s uintptr
+ ptr := unsafe_New(typ)
switch typ.size {
case 1:
- *(*uint8)(unsafe.Pointer(&s)) = uint8(bits)
+ *(*uint8)(unsafe.Pointer(ptr)) = uint8(bits)
case 2:
- *(*uint16)(unsafe.Pointer(&s)) = uint16(bits)
+ *(*uint16)(unsafe.Pointer(ptr)) = uint16(bits)
case 4:
- *(*uint32)(unsafe.Pointer(&s)) = uint32(bits)
+ *(*uint32)(unsafe.Pointer(ptr)) = uint32(bits)
case 8:
- *(*uint64)(unsafe.Pointer(&s)) = uint64(bits)
+ *(*uint64)(unsafe.Pointer(ptr)) = bits
}
- return Value{typ, unsafe.Pointer(&s) /* 0, */, f | flagIndir | flag(typ.Kind())<<flagKindShift}
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
}
// makeFloat returns a Value of type t equal to v (possibly truncated to float32),
// where t is a float32 or float64 type.
func makeFloat(f flag, v float64, t Type) Value {
typ := t.common()
- if typ.size > ptrSize {
- // Assume ptrSize >= 4, so this must be float64.
- ptr := unsafe_New(typ)
- *(*float64)(unsafe.Pointer(ptr)) = v
- return Value{typ, ptr /* 0, */, f | flagIndir | flag(typ.Kind())<<flagKindShift}
- }
-
- var s uintptr
+ ptr := unsafe_New(typ)
switch typ.size {
case 4:
- *(*float32)(unsafe.Pointer(&s)) = float32(v)
+ *(*float32)(unsafe.Pointer(ptr)) = float32(v)
case 8:
- *(*float64)(unsafe.Pointer(&s)) = v
+ *(*float64)(unsafe.Pointer(ptr)) = v
}
- return Value{typ, unsafe.Pointer(&s) /* 0, */, f | flagIndir | flag(typ.Kind())<<flagKindShift}
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
}
// makeComplex returns a Value of type t equal to v (possibly truncated to complex64),
// where t is a complex64 or complex128 type.
func makeComplex(f flag, v complex128, t Type) Value {
typ := t.common()
- if typ.size > ptrSize {
- ptr := unsafe_New(typ)
- switch typ.size {
- case 8:
- *(*complex64)(unsafe.Pointer(ptr)) = complex64(v)
- case 16:
- *(*complex128)(unsafe.Pointer(ptr)) = v
- }
- return Value{typ, ptr /* 0, */, f | flagIndir | flag(typ.Kind())<<flagKindShift}
+ ptr := unsafe_New(typ)
+ switch typ.size {
+ case 8:
+ *(*complex64)(unsafe.Pointer(ptr)) = complex64(v)
+ case 16:
+ *(*complex128)(unsafe.Pointer(ptr)) = v
}
-
- // Assume ptrSize <= 8 so this must be complex64.
- var s uintptr
- *(*complex64)(unsafe.Pointer(&s)) = complex64(v)
- return Value{typ, unsafe.Pointer(&s) /* 0, */, f | flagIndir | flag(typ.Kind())<<flagKindShift}
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
}
func makeString(f flag, v string, t Type) Value {
ret := New(t).Elem()
ret.SetString(v)
- ret.flag = ret.flag&^flagAddr | f | flagIndir
+ ret.flag = ret.flag&^flagAddr | f
return ret
}
func makeBytes(f flag, v []byte, t Type) Value {
ret := New(t).Elem()
ret.SetBytes(v)
- ret.flag = ret.flag&^flagAddr | f | flagIndir
+ ret.flag = ret.flag&^flagAddr | f
return ret
}
func makeRunes(f flag, v []rune, t Type) Value {
ret := New(t).Elem()
ret.setRunes(v)
- ret.flag = ret.flag&^flagAddr | f | flagIndir
+ ret.flag = ret.flag&^flagAddr | f
return ret
}
@@ -2532,7 +2297,7 @@ func cvtDirect(v Value, typ Type) Value {
ptr = c
f &^= flagAddr
}
- return Value{t, ptr /* v.scalar, */, v.flag&flagRO | f} // v.flag&flagRO|f == f?
+ return Value{t, ptr, v.flag&flagRO | f} // v.flag&flagRO|f == f?
}
// convertOp: concrete -> interface
@@ -2544,7 +2309,7 @@ func cvtT2I(v Value, typ Type) Value {
} else {
ifaceE2I(typ.(*rtype), x, target)
}
- return Value{typ.common(), target /* 0, */, v.flag&flagRO | flagIndir | flag(Interface)<<flagKindShift}
+ return Value{typ.common(), target, v.flag&flagRO | flagIndir | flag(Interface)}
}
// convertOp: interface -> interface
@@ -2557,7 +2322,7 @@ func cvtI2I(v Value, typ Type) Value {
return cvtT2I(v.Elem(), typ)
}
-// implemented in ../pkg/runtime
+// implemented in ../runtime
func chancap(ch unsafe.Pointer) int
func chanclose(ch unsafe.Pointer)
func chanlen(ch unsafe.Pointer) int
@@ -2577,10 +2342,14 @@ func mapiterinit(t *rtype, m unsafe.Pointer) unsafe.Pointer
func mapiterkey(it unsafe.Pointer) (key unsafe.Pointer)
func mapiternext(it unsafe.Pointer)
func maplen(m unsafe.Pointer) int
-
func call(typ *rtype, fnaddr unsafe.Pointer, isInterface bool, isMethod bool, params *unsafe.Pointer, results *unsafe.Pointer)
+
func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer)
+//go:noescape
+//extern memmove
+func memmove(adst, asrc unsafe.Pointer, n uintptr)
+
// Dummy annotation marking that the value x escapes,
// for use in cases where the reflect code is so clever that
// the compiler cannot follow.
OpenPOWER on IntegriCloud