Source file src/runtime/type.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Runtime type representation.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/runtime/atomic"
    13  	"unsafe"
    14  )
    15  
    16  //go:linkname maps_typeString internal/runtime/maps.typeString
    17  func maps_typeString(typ *abi.Type) string {
    18  	return toRType(typ).string()
    19  }
    20  
    21  type nameOff = abi.NameOff
    22  type typeOff = abi.TypeOff
    23  type textOff = abi.TextOff
    24  
    25  type _type = abi.Type
    26  
    27  // rtype is a wrapper that allows us to define additional methods.
    28  type rtype struct {
    29  	*abi.Type // embedding is okay here (unlike reflect) because none of this is public
    30  }
    31  
    32  func (t rtype) string() string {
    33  	s := t.nameOff(t.Str).Name()
    34  	if t.TFlag&abi.TFlagExtraStar != 0 {
    35  		return s[1:]
    36  	}
    37  	return s
    38  }
    39  
    40  func (t rtype) uncommon() *uncommontype {
    41  	return t.Uncommon()
    42  }
    43  
    44  func (t rtype) name() string {
    45  	if t.TFlag&abi.TFlagNamed == 0 {
    46  		return ""
    47  	}
    48  	s := t.string()
    49  	i := len(s) - 1
    50  	sqBrackets := 0
    51  	for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
    52  		switch s[i] {
    53  		case ']':
    54  			sqBrackets++
    55  		case '[':
    56  			sqBrackets--
    57  		}
    58  		i--
    59  	}
    60  	return s[i+1:]
    61  }
    62  
    63  // pkgpath returns the path of the package where t was defined, if
    64  // available. This is not the same as the reflect package's PkgPath
    65  // method, in that it returns the package path for struct and interface
    66  // types, not just named types.
    67  func (t rtype) pkgpath() string {
    68  	if u := t.uncommon(); u != nil {
    69  		return t.nameOff(u.PkgPath).Name()
    70  	}
    71  	switch t.Kind() {
    72  	case abi.Struct:
    73  		st := (*structtype)(unsafe.Pointer(t.Type))
    74  		return st.PkgPath.Name()
    75  	case abi.Interface:
    76  		it := (*interfacetype)(unsafe.Pointer(t.Type))
    77  		return it.PkgPath.Name()
    78  	}
    79  	return ""
    80  }
    81  
    82  // getGCMask returns the pointer/nonpointer bitmask for type t.
    83  //
    84  // nosplit because it is used during write barriers and must not be preempted.
    85  //
    86  //go:nosplit
    87  func getGCMask(t *_type) *byte {
    88  	if t.TFlag&abi.TFlagGCMaskOnDemand != 0 {
    89  		// Split the rest into getGCMaskOnDemand so getGCMask itself is inlineable.
    90  		return getGCMaskOnDemand(t)
    91  	}
    92  	return t.GCData
    93  }
    94  
    95  // inProgress is a byte whose address is a sentinel indicating that
    96  // some thread is currently building the GC bitmask for a type.
    97  var inProgress byte
    98  
    99  // nosplit because it is used during write barriers and must not be preempted.
   100  //
   101  //go:nosplit
   102  func getGCMaskOnDemand(t *_type) *byte {
   103  	// For large types, GCData doesn't point directly to a bitmask.
   104  	// Instead it points to a pointer to a bitmask, and the runtime
   105  	// is responsible for (on first use) creating the bitmask and
   106  	// storing a pointer to it in that slot.
   107  	// TODO: we could use &t.GCData as the slot, but types are
   108  	// in read-only memory currently.
   109  	addr := unsafe.Pointer(t.GCData)
   110  
   111  	if GOOS == "aix" {
   112  		addr = add(addr, firstmoduledata.data-aixStaticDataBase)
   113  	}
   114  
   115  	for {
   116  		p := (*byte)(atomic.Loadp(addr))
   117  		switch p {
   118  		default: // Already built.
   119  			return p
   120  		case &inProgress: // Someone else is currently building it.
   121  			// Just wait until the builder is done.
   122  			// We can't block here, so spinning while having
   123  			// the OS thread yield is about the best we can do.
   124  			osyield()
   125  			continue
   126  		case nil: // Not built yet.
   127  			// Attempt to get exclusive access to build it.
   128  			if !atomic.Casp1((*unsafe.Pointer)(addr), nil, unsafe.Pointer(&inProgress)) {
   129  				continue
   130  			}
   131  
   132  			// Build gcmask for this type.
   133  			bytes := goarch.PtrSize * divRoundUp(t.PtrBytes/goarch.PtrSize, 8*goarch.PtrSize)
   134  			p = (*byte)(persistentalloc(bytes, goarch.PtrSize, &memstats.other_sys))
   135  			systemstack(func() {
   136  				buildGCMask(t, bitCursor{ptr: p, n: 0})
   137  			})
   138  
   139  			// Store the newly-built gcmask for future callers.
   140  			atomic.StorepNoWB(addr, unsafe.Pointer(p))
   141  			return p
   142  		}
   143  	}
   144  }
   145  
   146  // A bitCursor is a simple cursor to memory to which we
   147  // can write a set of bits.
   148  type bitCursor struct {
   149  	ptr *byte   // base of region
   150  	n   uintptr // cursor points to bit n of region
   151  }
   152  
   153  // Write to b cnt bits starting at bit 0 of data.
   154  // Requires cnt>0.
   155  func (b bitCursor) write(data *byte, cnt uintptr) {
   156  	// Starting byte for writing.
   157  	p := addb(b.ptr, b.n/8)
   158  
   159  	// Note: if we're starting halfway through a byte, we load the
   160  	// existing lower bits so we don't clobber them.
   161  	n := b.n % 8                    // # of valid bits in buf
   162  	buf := uintptr(*p) & (1<<n - 1) // buffered bits to start
   163  
   164  	// Work 8 bits at a time.
   165  	for cnt > 8 {
   166  		// Read 8 more bits, now buf has 8-15 valid bits in it.
   167  		buf |= uintptr(*data) << n
   168  		n += 8
   169  		data = addb(data, 1)
   170  		cnt -= 8
   171  		// Write 8 of the buffered bits out.
   172  		*p = byte(buf)
   173  		buf >>= 8
   174  		n -= 8
   175  		p = addb(p, 1)
   176  	}
   177  	// Read remaining bits.
   178  	buf |= (uintptr(*data) & (1<<cnt - 1)) << n
   179  	n += cnt
   180  
   181  	// Flush remaining bits.
   182  	if n > 8 {
   183  		*p = byte(buf)
   184  		buf >>= 8
   185  		n -= 8
   186  		p = addb(p, 1)
   187  	}
   188  	*p &^= 1<<n - 1
   189  	*p |= byte(buf)
   190  }
   191  
   192  func (b bitCursor) offset(cnt uintptr) bitCursor {
   193  	return bitCursor{ptr: b.ptr, n: b.n + cnt}
   194  }
   195  
   196  // buildGCMask writes the ptr/nonptr bitmap for t to dst.
   197  // t must have a pointer.
   198  func buildGCMask(t *_type, dst bitCursor) {
   199  	// Note: we want to avoid a situation where buildGCMask gets into a
   200  	// very deep recursion, because M stacks are fixed size and pretty small
   201  	// (16KB). We do that by ensuring that any recursive
   202  	// call operates on a type at most half the size of its parent.
   203  	// Thus, the recursive chain can be at most 64 calls deep (on a
   204  	// 64-bit machine).
   205  	// Recursion is avoided by using a "tail call" (jumping to the
   206  	// "top" label) for any recursive call with a large subtype.
   207  top:
   208  	if t.PtrBytes == 0 {
   209  		throw("pointerless type")
   210  	}
   211  	if t.TFlag&abi.TFlagGCMaskOnDemand == 0 {
   212  		// copy t.GCData to dst
   213  		dst.write(t.GCData, t.PtrBytes/goarch.PtrSize)
   214  		return
   215  	}
   216  	// The above case should handle all kinds except
   217  	// possibly arrays and structs.
   218  	switch t.Kind() {
   219  	case abi.Array:
   220  		a := t.ArrayType()
   221  		if a.Len == 1 {
   222  			// Avoid recursive call for element type that
   223  			// isn't smaller than the parent type.
   224  			t = a.Elem
   225  			goto top
   226  		}
   227  		e := a.Elem
   228  		for i := uintptr(0); i < a.Len; i++ {
   229  			buildGCMask(e, dst)
   230  			dst = dst.offset(e.Size_ / goarch.PtrSize)
   231  		}
   232  	case abi.Struct:
   233  		s := t.StructType()
   234  		var bigField abi.StructField
   235  		for _, f := range s.Fields {
   236  			ft := f.Typ
   237  			if !ft.Pointers() {
   238  				continue
   239  			}
   240  			if ft.Size_ > t.Size_/2 {
   241  				// Avoid recursive call for field type that
   242  				// is larger than half of the parent type.
   243  				// There can be only one.
   244  				bigField = f
   245  				continue
   246  			}
   247  			buildGCMask(ft, dst.offset(f.Offset/goarch.PtrSize))
   248  		}
   249  		if bigField.Typ != nil {
   250  			// Note: this case causes bits to be written out of order.
   251  			t = bigField.Typ
   252  			dst = dst.offset(bigField.Offset / goarch.PtrSize)
   253  			goto top
   254  		}
   255  	default:
   256  		throw("unexpected kind")
   257  	}
   258  }
   259  
   260  // reflectOffs holds type offsets defined at run time by the reflect package.
   261  //
   262  // When a type is defined at run time, its *rtype data lives on the heap.
   263  // There are a wide range of possible addresses the heap may use, that
   264  // may not be representable as a 32-bit offset. Moreover the GC may
   265  // one day start moving heap memory, in which case there is no stable
   266  // offset that can be defined.
   267  //
   268  // To provide stable offsets, we add pin *rtype objects in a global map
   269  // and treat the offset as an identifier. We use negative offsets that
   270  // do not overlap with any compile-time module offsets.
   271  //
   272  // Entries are created by reflect.addReflectOff.
   273  var reflectOffs struct {
   274  	lock mutex
   275  	next int32
   276  	m    map[int32]unsafe.Pointer
   277  	minv map[unsafe.Pointer]int32
   278  }
   279  
   280  func reflectOffsLock() {
   281  	lock(&reflectOffs.lock)
   282  	if raceenabled {
   283  		raceacquire(unsafe.Pointer(&reflectOffs.lock))
   284  	}
   285  }
   286  
   287  func reflectOffsUnlock() {
   288  	if raceenabled {
   289  		racerelease(unsafe.Pointer(&reflectOffs.lock))
   290  	}
   291  	unlock(&reflectOffs.lock)
   292  }
   293  
   294  func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name {
   295  	if off == 0 {
   296  		return name{}
   297  	}
   298  	base := uintptr(ptrInModule)
   299  	for md := &firstmoduledata; md != nil; md = md.next {
   300  		if base >= md.types && base < md.etypes {
   301  			res := md.types + uintptr(off)
   302  			if res > md.etypes {
   303  				println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
   304  				throw("runtime: name offset out of range")
   305  			}
   306  			return name{Bytes: (*byte)(unsafe.Pointer(res))}
   307  		}
   308  	}
   309  
   310  	// No module found. see if it is a run time name.
   311  	reflectOffsLock()
   312  	res, found := reflectOffs.m[int32(off)]
   313  	reflectOffsUnlock()
   314  	if !found {
   315  		println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:")
   316  		for next := &firstmoduledata; next != nil; next = next.next {
   317  			println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   318  		}
   319  		throw("runtime: name offset base pointer out of range")
   320  	}
   321  	return name{Bytes: (*byte)(res)}
   322  }
   323  
   324  func (t rtype) nameOff(off nameOff) name {
   325  	return resolveNameOff(unsafe.Pointer(t.Type), off)
   326  }
   327  
   328  func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type {
   329  	if off == 0 || off == -1 {
   330  		// -1 is the sentinel value for unreachable code.
   331  		// See cmd/link/internal/ld/data.go:relocsym.
   332  		return nil
   333  	}
   334  	base := uintptr(ptrInModule)
   335  	var md *moduledata
   336  	for next := &firstmoduledata; next != nil; next = next.next {
   337  		if base >= next.types && base < next.etypes {
   338  			md = next
   339  			break
   340  		}
   341  	}
   342  	if md == nil {
   343  		reflectOffsLock()
   344  		res := reflectOffs.m[int32(off)]
   345  		reflectOffsUnlock()
   346  		if res == nil {
   347  			println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
   348  			for next := &firstmoduledata; next != nil; next = next.next {
   349  				println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   350  			}
   351  			throw("runtime: type offset base pointer out of range")
   352  		}
   353  		return (*_type)(res)
   354  	}
   355  	if t := md.typemap[off]; t != nil {
   356  		return t
   357  	}
   358  	res := md.types + uintptr(off)
   359  	if res > md.etypes {
   360  		println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
   361  		throw("runtime: type offset out of range")
   362  	}
   363  	return (*_type)(unsafe.Pointer(res))
   364  }
   365  
   366  func (t rtype) typeOff(off typeOff) *_type {
   367  	return resolveTypeOff(unsafe.Pointer(t.Type), off)
   368  }
   369  
   370  func (t rtype) textOff(off textOff) unsafe.Pointer {
   371  	if off == -1 {
   372  		// -1 is the sentinel value for unreachable code.
   373  		// See cmd/link/internal/ld/data.go:relocsym.
   374  		return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
   375  	}
   376  	base := uintptr(unsafe.Pointer(t.Type))
   377  	var md *moduledata
   378  	for next := &firstmoduledata; next != nil; next = next.next {
   379  		if base >= next.types && base < next.etypes {
   380  			md = next
   381  			break
   382  		}
   383  	}
   384  	if md == nil {
   385  		reflectOffsLock()
   386  		res := reflectOffs.m[int32(off)]
   387  		reflectOffsUnlock()
   388  		if res == nil {
   389  			println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
   390  			for next := &firstmoduledata; next != nil; next = next.next {
   391  				println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   392  			}
   393  			throw("runtime: text offset base pointer out of range")
   394  		}
   395  		return res
   396  	}
   397  	res := md.textAddr(uint32(off))
   398  	return unsafe.Pointer(res)
   399  }
   400  
   401  type uncommontype = abi.UncommonType
   402  
   403  type interfacetype = abi.InterfaceType
   404  
   405  type arraytype = abi.ArrayType
   406  
   407  type chantype = abi.ChanType
   408  
   409  type slicetype = abi.SliceType
   410  
   411  type functype = abi.FuncType
   412  
   413  type ptrtype = abi.PtrType
   414  
   415  type name = abi.Name
   416  
   417  type structtype = abi.StructType
   418  
   419  func pkgPath(n name) string {
   420  	if n.Bytes == nil || *n.Data(0)&(1<<2) == 0 {
   421  		return ""
   422  	}
   423  	i, l := n.ReadVarint(1)
   424  	off := 1 + i + l
   425  	if *n.Data(0)&(1<<1) != 0 {
   426  		i2, l2 := n.ReadVarint(off)
   427  		off += i2 + l2
   428  	}
   429  	var nameOff nameOff
   430  	copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.Data(off)))[:])
   431  	pkgPathName := resolveNameOff(unsafe.Pointer(n.Bytes), nameOff)
   432  	return pkgPathName.Name()
   433  }
   434  
   435  // typelinksinit scans the types from extra modules and builds the
   436  // moduledata typemap used to de-duplicate type pointers.
   437  func typelinksinit() {
   438  	if firstmoduledata.next == nil {
   439  		return
   440  	}
   441  	typehash := make(map[uint32][]*_type, len(firstmoduledata.typelinks))
   442  
   443  	modules := activeModules()
   444  	prev := modules[0]
   445  	for _, md := range modules[1:] {
   446  		// Collect types from the previous module into typehash.
   447  	collect:
   448  		for _, tl := range prev.typelinks {
   449  			var t *_type
   450  			if prev.typemap == nil {
   451  				t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl)))
   452  			} else {
   453  				t = prev.typemap[typeOff(tl)]
   454  			}
   455  			// Add to typehash if not seen before.
   456  			tlist := typehash[t.Hash]
   457  			for _, tcur := range tlist {
   458  				if tcur == t {
   459  					continue collect
   460  				}
   461  			}
   462  			typehash[t.Hash] = append(tlist, t)
   463  		}
   464  
   465  		if md.typemap == nil {
   466  			// If any of this module's typelinks match a type from a
   467  			// prior module, prefer that prior type by adding the offset
   468  			// to this module's typemap.
   469  			tm := make(map[typeOff]*_type, len(md.typelinks))
   470  			pinnedTypemaps = append(pinnedTypemaps, tm)
   471  			md.typemap = tm
   472  			for _, tl := range md.typelinks {
   473  				t := (*_type)(unsafe.Pointer(md.types + uintptr(tl)))
   474  				for _, candidate := range typehash[t.Hash] {
   475  					seen := map[_typePair]struct{}{}
   476  					if typesEqual(t, candidate, seen) {
   477  						t = candidate
   478  						break
   479  					}
   480  				}
   481  				md.typemap[typeOff(tl)] = t
   482  			}
   483  		}
   484  
   485  		prev = md
   486  	}
   487  }
   488  
   489  type _typePair struct {
   490  	t1 *_type
   491  	t2 *_type
   492  }
   493  
   494  func toRType(t *abi.Type) rtype {
   495  	return rtype{t}
   496  }
   497  
   498  // typesEqual reports whether two types are equal.
   499  //
   500  // Everywhere in the runtime and reflect packages, it is assumed that
   501  // there is exactly one *_type per Go type, so that pointer equality
   502  // can be used to test if types are equal. There is one place that
   503  // breaks this assumption: buildmode=shared. In this case a type can
   504  // appear as two different pieces of memory. This is hidden from the
   505  // runtime and reflect package by the per-module typemap built in
   506  // typelinksinit. It uses typesEqual to map types from later modules
   507  // back into earlier ones.
   508  //
   509  // Only typelinksinit needs this function.
   510  func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
   511  	tp := _typePair{t, v}
   512  	if _, ok := seen[tp]; ok {
   513  		return true
   514  	}
   515  
   516  	// mark these types as seen, and thus equivalent which prevents an infinite loop if
   517  	// the two types are identical, but recursively defined and loaded from
   518  	// different modules
   519  	seen[tp] = struct{}{}
   520  
   521  	if t == v {
   522  		return true
   523  	}
   524  	kind := t.Kind()
   525  	if kind != v.Kind() {
   526  		return false
   527  	}
   528  	rt, rv := toRType(t), toRType(v)
   529  	if rt.string() != rv.string() {
   530  		return false
   531  	}
   532  	ut := t.Uncommon()
   533  	uv := v.Uncommon()
   534  	if ut != nil || uv != nil {
   535  		if ut == nil || uv == nil {
   536  			return false
   537  		}
   538  		pkgpatht := rt.nameOff(ut.PkgPath).Name()
   539  		pkgpathv := rv.nameOff(uv.PkgPath).Name()
   540  		if pkgpatht != pkgpathv {
   541  			return false
   542  		}
   543  	}
   544  	if abi.Bool <= kind && kind <= abi.Complex128 {
   545  		return true
   546  	}
   547  	switch kind {
   548  	case abi.String, abi.UnsafePointer:
   549  		return true
   550  	case abi.Array:
   551  		at := (*arraytype)(unsafe.Pointer(t))
   552  		av := (*arraytype)(unsafe.Pointer(v))
   553  		return typesEqual(at.Elem, av.Elem, seen) && at.Len == av.Len
   554  	case abi.Chan:
   555  		ct := (*chantype)(unsafe.Pointer(t))
   556  		cv := (*chantype)(unsafe.Pointer(v))
   557  		return ct.Dir == cv.Dir && typesEqual(ct.Elem, cv.Elem, seen)
   558  	case abi.Func:
   559  		ft := (*functype)(unsafe.Pointer(t))
   560  		fv := (*functype)(unsafe.Pointer(v))
   561  		if ft.OutCount != fv.OutCount || ft.InCount != fv.InCount {
   562  			return false
   563  		}
   564  		tin, vin := ft.InSlice(), fv.InSlice()
   565  		for i := 0; i < len(tin); i++ {
   566  			if !typesEqual(tin[i], vin[i], seen) {
   567  				return false
   568  			}
   569  		}
   570  		tout, vout := ft.OutSlice(), fv.OutSlice()
   571  		for i := 0; i < len(tout); i++ {
   572  			if !typesEqual(tout[i], vout[i], seen) {
   573  				return false
   574  			}
   575  		}
   576  		return true
   577  	case abi.Interface:
   578  		it := (*interfacetype)(unsafe.Pointer(t))
   579  		iv := (*interfacetype)(unsafe.Pointer(v))
   580  		if it.PkgPath.Name() != iv.PkgPath.Name() {
   581  			return false
   582  		}
   583  		if len(it.Methods) != len(iv.Methods) {
   584  			return false
   585  		}
   586  		for i := range it.Methods {
   587  			tm := &it.Methods[i]
   588  			vm := &iv.Methods[i]
   589  			// Note the mhdr array can be relocated from
   590  			// another module. See #17724.
   591  			tname := resolveNameOff(unsafe.Pointer(tm), tm.Name)
   592  			vname := resolveNameOff(unsafe.Pointer(vm), vm.Name)
   593  			if tname.Name() != vname.Name() {
   594  				return false
   595  			}
   596  			if pkgPath(tname) != pkgPath(vname) {
   597  				return false
   598  			}
   599  			tityp := resolveTypeOff(unsafe.Pointer(tm), tm.Typ)
   600  			vityp := resolveTypeOff(unsafe.Pointer(vm), vm.Typ)
   601  			if !typesEqual(tityp, vityp, seen) {
   602  				return false
   603  			}
   604  		}
   605  		return true
   606  	case abi.Map:
   607  		mt := (*abi.MapType)(unsafe.Pointer(t))
   608  		mv := (*abi.MapType)(unsafe.Pointer(v))
   609  		return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
   610  	case abi.Pointer:
   611  		pt := (*ptrtype)(unsafe.Pointer(t))
   612  		pv := (*ptrtype)(unsafe.Pointer(v))
   613  		return typesEqual(pt.Elem, pv.Elem, seen)
   614  	case abi.Slice:
   615  		st := (*slicetype)(unsafe.Pointer(t))
   616  		sv := (*slicetype)(unsafe.Pointer(v))
   617  		return typesEqual(st.Elem, sv.Elem, seen)
   618  	case abi.Struct:
   619  		st := (*structtype)(unsafe.Pointer(t))
   620  		sv := (*structtype)(unsafe.Pointer(v))
   621  		if len(st.Fields) != len(sv.Fields) {
   622  			return false
   623  		}
   624  		if st.PkgPath.Name() != sv.PkgPath.Name() {
   625  			return false
   626  		}
   627  		for i := range st.Fields {
   628  			tf := &st.Fields[i]
   629  			vf := &sv.Fields[i]
   630  			if tf.Name.Name() != vf.Name.Name() {
   631  				return false
   632  			}
   633  			if !typesEqual(tf.Typ, vf.Typ, seen) {
   634  				return false
   635  			}
   636  			if tf.Name.Tag() != vf.Name.Tag() {
   637  				return false
   638  			}
   639  			if tf.Offset != vf.Offset {
   640  				return false
   641  			}
   642  			if tf.Name.IsEmbedded() != vf.Name.IsEmbedded() {
   643  				return false
   644  			}
   645  		}
   646  		return true
   647  	default:
   648  		println("runtime: impossible type kind", kind)
   649  		throw("runtime: impossible type kind")
   650  		return false
   651  	}
   652  }
   653  

View as plain text