Source file src/cmd/compile/internal/reflectdata/alg.go

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package reflectdata
     6  
     7  import (
     8  	"fmt"
     9  
    10  	"cmd/compile/internal/base"
    11  	"cmd/compile/internal/compare"
    12  	"cmd/compile/internal/ir"
    13  	"cmd/compile/internal/objw"
    14  	"cmd/compile/internal/typecheck"
    15  	"cmd/compile/internal/types"
    16  	"cmd/internal/obj"
    17  	"cmd/internal/src"
    18  )
    19  
    20  // AlgType returns the fixed-width AMEMxx variants instead of the general
    21  // AMEM kind when possible.
    22  func AlgType(t *types.Type) types.AlgKind {
    23  	a := types.AlgType(t)
    24  	if a == types.AMEM {
    25  		if t.Alignment() < int64(base.Ctxt.Arch.Alignment) && t.Alignment() < t.Size() {
    26  			// For example, we can't treat [2]int16 as an int32 if int32s require
    27  			// 4-byte alignment. See issue 46283.
    28  			return a
    29  		}
    30  		switch t.Size() {
    31  		case 0:
    32  			return types.AMEM0
    33  		case 1:
    34  			return types.AMEM8
    35  		case 2:
    36  			return types.AMEM16
    37  		case 4:
    38  			return types.AMEM32
    39  		case 8:
    40  			return types.AMEM64
    41  		case 16:
    42  			return types.AMEM128
    43  		}
    44  	}
    45  
    46  	return a
    47  }
    48  
    49  // genhash returns a symbol which is the closure used to compute
    50  // the hash of a value of type t.
    51  // Note: the generated function must match runtime.typehash exactly.
    52  func genhash(t *types.Type) *obj.LSym {
    53  	switch AlgType(t) {
    54  	default:
    55  		// genhash is only called for types that have equality
    56  		base.Fatalf("genhash %v", t)
    57  	case types.AMEM0:
    58  		return sysClosure("memhash0")
    59  	case types.AMEM8:
    60  		return sysClosure("memhash8")
    61  	case types.AMEM16:
    62  		return sysClosure("memhash16")
    63  	case types.AMEM32:
    64  		return sysClosure("memhash32")
    65  	case types.AMEM64:
    66  		return sysClosure("memhash64")
    67  	case types.AMEM128:
    68  		return sysClosure("memhash128")
    69  	case types.ASTRING:
    70  		return sysClosure("strhash")
    71  	case types.AINTER:
    72  		return sysClosure("interhash")
    73  	case types.ANILINTER:
    74  		return sysClosure("nilinterhash")
    75  	case types.AFLOAT32:
    76  		return sysClosure("f32hash")
    77  	case types.AFLOAT64:
    78  		return sysClosure("f64hash")
    79  	case types.ACPLX64:
    80  		return sysClosure("c64hash")
    81  	case types.ACPLX128:
    82  		return sysClosure("c128hash")
    83  	case types.AMEM:
    84  		// For other sizes of plain memory, we build a closure
    85  		// that calls memhash_varlen. The size of the memory is
    86  		// encoded in the first slot of the closure.
    87  		closure := TypeLinksymLookup(fmt.Sprintf(".hashfunc%d", t.Size()))
    88  		if len(closure.P) > 0 { // already generated
    89  			return closure
    90  		}
    91  		if memhashvarlen == nil {
    92  			memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen")
    93  		}
    94  		ot := 0
    95  		ot = objw.SymPtr(closure, ot, memhashvarlen, 0)
    96  		ot = objw.Uintptr(closure, ot, uint64(t.Size())) // size encoded in closure
    97  		objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
    98  		return closure
    99  	case types.ASPECIAL:
   100  		break
   101  	}
   102  
   103  	closure := TypeLinksymPrefix(".hashfunc", t)
   104  	if len(closure.P) > 0 { // already generated
   105  		return closure
   106  	}
   107  
   108  	// Generate hash functions for subtypes.
   109  	// There are cases where we might not use these hashes,
   110  	// but in that case they will get dead-code eliminated.
   111  	// (And the closure generated by genhash will also get
   112  	// dead-code eliminated, as we call the subtype hashers
   113  	// directly.)
   114  	switch t.Kind() {
   115  	case types.TARRAY:
   116  		genhash(t.Elem())
   117  	case types.TSTRUCT:
   118  		for _, f := range t.Fields() {
   119  			genhash(f.Type)
   120  		}
   121  	}
   122  
   123  	if base.Flag.LowerR != 0 {
   124  		fmt.Printf("genhash %v %v\n", closure, t)
   125  	}
   126  
   127  	fn := hashFunc(t)
   128  
   129  	// Build closure. It doesn't close over any variables, so
   130  	// it contains just the function pointer.
   131  	objw.SymPtr(closure, 0, fn.Linksym(), 0)
   132  	objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
   133  
   134  	return closure
   135  }
   136  
   137  func hashFunc(t *types.Type) *ir.Func {
   138  	sym := TypeSymPrefix(".hash", t)
   139  	if sym.Def != nil {
   140  		return sym.Def.(*ir.Name).Func
   141  	}
   142  
   143  	pos := base.AutogeneratedPos // less confusing than end of input
   144  	base.Pos = pos
   145  
   146  	// func sym(p *T, h uintptr) uintptr
   147  	fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
   148  		[]*types.Field{
   149  			types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)),
   150  			types.NewField(pos, typecheck.Lookup("h"), types.Types[types.TUINTPTR]),
   151  		},
   152  		[]*types.Field{
   153  			types.NewField(pos, nil, types.Types[types.TUINTPTR]),
   154  		},
   155  	))
   156  	sym.Def = fn.Nname
   157  	fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
   158  
   159  	typecheck.DeclFunc(fn)
   160  	np := fn.Dcl[0]
   161  	nh := fn.Dcl[1]
   162  
   163  	switch t.Kind() {
   164  	case types.TARRAY:
   165  		// An array of pure memory would be handled by the
   166  		// standard algorithm, so the element type must not be
   167  		// pure memory.
   168  		hashel := hashfor(t.Elem())
   169  
   170  		// for i := 0; i < nelem; i++
   171  		ni := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
   172  		init := ir.NewAssignStmt(base.Pos, ni, ir.NewInt(base.Pos, 0))
   173  		cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, ir.NewInt(base.Pos, t.NumElem()))
   174  		post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, ir.NewInt(base.Pos, 1)))
   175  		loop := ir.NewForStmt(base.Pos, nil, cond, post, nil, false)
   176  		loop.PtrInit().Append(init)
   177  
   178  		// h = hashel(&p[i], h)
   179  		call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
   180  
   181  		nx := ir.NewIndexExpr(base.Pos, np, ni)
   182  		nx.SetBounded(true)
   183  		na := typecheck.NodAddr(nx)
   184  		call.Args.Append(na)
   185  		call.Args.Append(nh)
   186  		loop.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
   187  
   188  		fn.Body.Append(loop)
   189  
   190  	case types.TSTRUCT:
   191  		// Walk the struct using memhash for runs of AMEM
   192  		// and calling specific hash functions for the others.
   193  		for i, fields := 0, t.Fields(); i < len(fields); {
   194  			f := fields[i]
   195  
   196  			// Skip blank fields.
   197  			if f.Sym.IsBlank() {
   198  				i++
   199  				continue
   200  			}
   201  
   202  			// Hash non-memory fields with appropriate hash function.
   203  			if !compare.IsRegularMemory(f.Type) {
   204  				hashel := hashfor(f.Type)
   205  				call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
   206  				na := typecheck.NodAddr(typecheck.DotField(base.Pos, np, i))
   207  				call.Args.Append(na)
   208  				call.Args.Append(nh)
   209  				fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
   210  				i++
   211  				continue
   212  			}
   213  
   214  			// Otherwise, hash a maximal length run of raw memory.
   215  			size, next := compare.Memrun(t, i)
   216  
   217  			// h = hashel(&p.first, size, h)
   218  			hashel := hashmem(f.Type)
   219  			call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
   220  			na := typecheck.NodAddr(typecheck.DotField(base.Pos, np, i))
   221  			call.Args.Append(na)
   222  			call.Args.Append(nh)
   223  			call.Args.Append(ir.NewInt(base.Pos, size))
   224  			fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
   225  
   226  			i = next
   227  		}
   228  	}
   229  
   230  	r := ir.NewReturnStmt(base.Pos, nil)
   231  	r.Results.Append(nh)
   232  	fn.Body.Append(r)
   233  
   234  	if base.Flag.LowerR != 0 {
   235  		ir.DumpList("genhash body", fn.Body)
   236  	}
   237  
   238  	typecheck.FinishFuncBody()
   239  
   240  	fn.SetDupok(true)
   241  
   242  	ir.WithFunc(fn, func() {
   243  		typecheck.Stmts(fn.Body)
   244  	})
   245  
   246  	fn.SetNilCheckDisabled(true)
   247  
   248  	return fn
   249  }
   250  
   251  func runtimeHashFor(name string, t *types.Type) *ir.Name {
   252  	return typecheck.LookupRuntime(name, t)
   253  }
   254  
   255  // hashfor returns the function to compute the hash of a value of type t.
   256  func hashfor(t *types.Type) *ir.Name {
   257  	switch types.AlgType(t) {
   258  	case types.AMEM:
   259  		base.Fatalf("hashfor with AMEM type")
   260  	case types.AINTER:
   261  		return runtimeHashFor("interhash", t)
   262  	case types.ANILINTER:
   263  		return runtimeHashFor("nilinterhash", t)
   264  	case types.ASTRING:
   265  		return runtimeHashFor("strhash", t)
   266  	case types.AFLOAT32:
   267  		return runtimeHashFor("f32hash", t)
   268  	case types.AFLOAT64:
   269  		return runtimeHashFor("f64hash", t)
   270  	case types.ACPLX64:
   271  		return runtimeHashFor("c64hash", t)
   272  	case types.ACPLX128:
   273  		return runtimeHashFor("c128hash", t)
   274  	}
   275  
   276  	fn := hashFunc(t)
   277  	return fn.Nname
   278  }
   279  
   280  // sysClosure returns a closure which will call the
   281  // given runtime function (with no closed-over variables).
   282  func sysClosure(name string) *obj.LSym {
   283  	s := typecheck.LookupRuntimeVar(name + "·f")
   284  	if len(s.P) == 0 {
   285  		f := typecheck.LookupRuntimeFunc(name)
   286  		objw.SymPtr(s, 0, f, 0)
   287  		objw.Global(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
   288  	}
   289  	return s
   290  }
   291  
   292  // geneq returns a symbol which is the closure used to compute
   293  // equality for two objects of type t.
   294  func geneq(t *types.Type) *obj.LSym {
   295  	switch AlgType(t) {
   296  	case types.ANOEQ, types.ANOALG:
   297  		// The runtime will panic if it tries to compare
   298  		// a type with a nil equality function.
   299  		return nil
   300  	case types.AMEM0:
   301  		return sysClosure("memequal0")
   302  	case types.AMEM8:
   303  		return sysClosure("memequal8")
   304  	case types.AMEM16:
   305  		return sysClosure("memequal16")
   306  	case types.AMEM32:
   307  		return sysClosure("memequal32")
   308  	case types.AMEM64:
   309  		return sysClosure("memequal64")
   310  	case types.AMEM128:
   311  		return sysClosure("memequal128")
   312  	case types.ASTRING:
   313  		return sysClosure("strequal")
   314  	case types.AINTER:
   315  		return sysClosure("interequal")
   316  	case types.ANILINTER:
   317  		return sysClosure("nilinterequal")
   318  	case types.AFLOAT32:
   319  		return sysClosure("f32equal")
   320  	case types.AFLOAT64:
   321  		return sysClosure("f64equal")
   322  	case types.ACPLX64:
   323  		return sysClosure("c64equal")
   324  	case types.ACPLX128:
   325  		return sysClosure("c128equal")
   326  	case types.AMEM:
   327  		// make equality closure. The size of the type
   328  		// is encoded in the closure.
   329  		closure := TypeLinksymLookup(fmt.Sprintf(".eqfunc%d", t.Size()))
   330  		if len(closure.P) != 0 {
   331  			return closure
   332  		}
   333  		if memequalvarlen == nil {
   334  			memequalvarlen = typecheck.LookupRuntimeFunc("memequal_varlen")
   335  		}
   336  		ot := 0
   337  		ot = objw.SymPtr(closure, ot, memequalvarlen, 0)
   338  		ot = objw.Uintptr(closure, ot, uint64(t.Size()))
   339  		objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
   340  		return closure
   341  	case types.ASPECIAL:
   342  		break
   343  	}
   344  
   345  	closure := TypeLinksymPrefix(".eqfunc", t)
   346  	if len(closure.P) > 0 { // already generated
   347  		return closure
   348  	}
   349  
   350  	if base.Flag.LowerR != 0 {
   351  		fmt.Printf("geneq %v\n", t)
   352  	}
   353  
   354  	fn := eqFunc(t)
   355  
   356  	// Generate a closure which points at the function we just generated.
   357  	objw.SymPtr(closure, 0, fn.Linksym(), 0)
   358  	objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
   359  	return closure
   360  }
   361  
   362  func eqFunc(t *types.Type) *ir.Func {
   363  	// Autogenerate code for equality of structs and arrays.
   364  	sym := TypeSymPrefix(".eq", t)
   365  	if sym.Def != nil {
   366  		return sym.Def.(*ir.Name).Func
   367  	}
   368  
   369  	pos := base.AutogeneratedPos // less confusing than end of input
   370  	base.Pos = pos
   371  
   372  	// func sym(p, q *T) bool
   373  	fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
   374  		[]*types.Field{
   375  			types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)),
   376  			types.NewField(pos, typecheck.Lookup("q"), types.NewPtr(t)),
   377  		},
   378  		[]*types.Field{
   379  			types.NewField(pos, typecheck.Lookup("r"), types.Types[types.TBOOL]),
   380  		},
   381  	))
   382  	sym.Def = fn.Nname
   383  	fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
   384  
   385  	typecheck.DeclFunc(fn)
   386  	np := fn.Dcl[0]
   387  	nq := fn.Dcl[1]
   388  	nr := fn.Dcl[2]
   389  
   390  	// Label to jump to if an equality test fails.
   391  	neq := typecheck.AutoLabel(".neq")
   392  
   393  	// We reach here only for types that have equality but
   394  	// cannot be handled by the standard algorithms,
   395  	// so t must be either an array or a struct.
   396  	switch t.Kind() {
   397  	default:
   398  		base.Fatalf("geneq %v", t)
   399  
   400  	case types.TARRAY:
   401  		nelem := t.NumElem()
   402  
   403  		// checkAll generates code to check the equality of all array elements.
   404  		// If unroll is greater than nelem, checkAll generates:
   405  		//
   406  		// if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
   407  		// } else {
   408  		//   goto neq
   409  		// }
   410  		//
   411  		// And so on.
   412  		//
   413  		// Otherwise it generates:
   414  		//
   415  		// iterateTo := nelem/unroll*unroll
   416  		// for i := 0; i < iterateTo; i += unroll {
   417  		//   if eq(p[i+0], q[i+0]) && eq(p[i+1], q[i+1]) && ... && eq(p[i+unroll-1], q[i+unroll-1]) {
   418  		//   } else {
   419  		//     goto neq
   420  		//   }
   421  		// }
   422  		// if eq(p[iterateTo+0], q[iterateTo+0]) && eq(p[iterateTo+1], q[iterateTo+1]) && ... {
   423  		// } else {
   424  		//    goto neq
   425  		// }
   426  		//
   427  		checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
   428  			// checkIdx generates a node to check for equality at index i.
   429  			checkIdx := func(i ir.Node) ir.Node {
   430  				// pi := p[i]
   431  				pi := ir.NewIndexExpr(base.Pos, np, i)
   432  				pi.SetBounded(true)
   433  				pi.SetType(t.Elem())
   434  				// qi := q[i]
   435  				qi := ir.NewIndexExpr(base.Pos, nq, i)
   436  				qi.SetBounded(true)
   437  				qi.SetType(t.Elem())
   438  				return eq(pi, qi)
   439  			}
   440  
   441  			iterations := nelem / unroll
   442  			iterateTo := iterations * unroll
   443  			// If a loop is iterated only once, there shouldn't be any loop at all.
   444  			if iterations == 1 {
   445  				iterateTo = 0
   446  			}
   447  
   448  			if iterateTo > 0 {
   449  				// Generate an unrolled for loop.
   450  				// for i := 0; i < nelem/unroll*unroll; i += unroll
   451  				i := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
   452  				init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(base.Pos, 0))
   453  				cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(base.Pos, iterateTo))
   454  				loop := ir.NewForStmt(base.Pos, nil, cond, nil, nil, false)
   455  				loop.PtrInit().Append(init)
   456  
   457  				// if eq(p[i+0], q[i+0]) && eq(p[i+1], q[i+1]) && ... && eq(p[i+unroll-1], q[i+unroll-1]) {
   458  				// } else {
   459  				//   goto neq
   460  				// }
   461  				for j := int64(0); j < unroll; j++ {
   462  					// if check {} else { goto neq }
   463  					nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil)
   464  					nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
   465  					loop.Body.Append(nif)
   466  					post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(base.Pos, 1)))
   467  					loop.Body.Append(post)
   468  				}
   469  
   470  				fn.Body.Append(loop)
   471  
   472  				if nelem == iterateTo {
   473  					if last {
   474  						fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, true)))
   475  					}
   476  					return
   477  				}
   478  			}
   479  
   480  			// Generate remaining checks, if nelem is not a multiple of unroll.
   481  			if last {
   482  				// Do last comparison in a different manner.
   483  				nelem--
   484  			}
   485  			// if eq(p[iterateTo+0], q[iterateTo+0]) && eq(p[iterateTo+1], q[iterateTo+1]) && ... {
   486  			// } else {
   487  			//    goto neq
   488  			// }
   489  			for j := iterateTo; j < nelem; j++ {
   490  				// if check {} else { goto neq }
   491  				nif := ir.NewIfStmt(base.Pos, checkIdx(ir.NewInt(base.Pos, j)), nil, nil)
   492  				nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
   493  				fn.Body.Append(nif)
   494  			}
   495  			if last {
   496  				fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(ir.NewInt(base.Pos, nelem))))
   497  			}
   498  		}
   499  
   500  		switch t.Elem().Kind() {
   501  		case types.TSTRING:
   502  			// Do two loops. First, check that all the lengths match (cheap).
   503  			// Second, check that all the contents match (expensive).
   504  			checkAll(3, false, func(pi, qi ir.Node) ir.Node {
   505  				// Compare lengths.
   506  				eqlen, _ := compare.EqString(pi, qi)
   507  				return eqlen
   508  			})
   509  			checkAll(1, true, func(pi, qi ir.Node) ir.Node {
   510  				// Compare contents.
   511  				_, eqmem := compare.EqString(pi, qi)
   512  				return eqmem
   513  			})
   514  		case types.TFLOAT32, types.TFLOAT64:
   515  			checkAll(2, true, func(pi, qi ir.Node) ir.Node {
   516  				// p[i] == q[i]
   517  				return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
   518  			})
   519  		case types.TSTRUCT:
   520  			isCall := func(n ir.Node) bool {
   521  				return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
   522  			}
   523  			var expr ir.Node
   524  			var hasCallExprs bool
   525  			allCallExprs := true
   526  			and := func(cond ir.Node) {
   527  				if expr == nil {
   528  					expr = cond
   529  				} else {
   530  					expr = ir.NewLogicalExpr(base.Pos, ir.OANDAND, expr, cond)
   531  				}
   532  			}
   533  
   534  			var tmpPos src.XPos
   535  			pi := ir.NewIndexExpr(tmpPos, np, ir.NewInt(tmpPos, 0))
   536  			pi.SetBounded(true)
   537  			pi.SetType(t.Elem())
   538  			qi := ir.NewIndexExpr(tmpPos, nq, ir.NewInt(tmpPos, 0))
   539  			qi.SetBounded(true)
   540  			qi.SetType(t.Elem())
   541  			flatConds, canPanic := compare.EqStruct(t.Elem(), pi, qi)
   542  			for _, c := range flatConds {
   543  				if isCall(c) {
   544  					hasCallExprs = true
   545  				} else {
   546  					allCallExprs = false
   547  				}
   548  			}
   549  			if !hasCallExprs || allCallExprs || canPanic {
   550  				checkAll(1, true, func(pi, qi ir.Node) ir.Node {
   551  					// p[i] == q[i]
   552  					return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
   553  				})
   554  			} else {
   555  				checkAll(4, false, func(pi, qi ir.Node) ir.Node {
   556  					expr = nil
   557  					flatConds, _ := compare.EqStruct(t.Elem(), pi, qi)
   558  					if len(flatConds) == 0 {
   559  						return ir.NewBool(base.Pos, true)
   560  					}
   561  					for _, c := range flatConds {
   562  						if !isCall(c) {
   563  							and(c)
   564  						}
   565  					}
   566  					return expr
   567  				})
   568  				checkAll(2, true, func(pi, qi ir.Node) ir.Node {
   569  					expr = nil
   570  					flatConds, _ := compare.EqStruct(t.Elem(), pi, qi)
   571  					for _, c := range flatConds {
   572  						if isCall(c) {
   573  							and(c)
   574  						}
   575  					}
   576  					return expr
   577  				})
   578  			}
   579  		default:
   580  			checkAll(1, true, func(pi, qi ir.Node) ir.Node {
   581  				// p[i] == q[i]
   582  				return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
   583  			})
   584  		}
   585  
   586  	case types.TSTRUCT:
   587  		flatConds, _ := compare.EqStruct(t, np, nq)
   588  		if len(flatConds) == 0 {
   589  			fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, true)))
   590  		} else {
   591  			for _, c := range flatConds[:len(flatConds)-1] {
   592  				// if cond {} else { goto neq }
   593  				n := ir.NewIfStmt(base.Pos, c, nil, nil)
   594  				n.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
   595  				fn.Body.Append(n)
   596  			}
   597  			fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, flatConds[len(flatConds)-1]))
   598  		}
   599  	}
   600  
   601  	// ret:
   602  	//   return
   603  	ret := typecheck.AutoLabel(".ret")
   604  	fn.Body.Append(ir.NewLabelStmt(base.Pos, ret))
   605  	fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
   606  
   607  	// neq:
   608  	//   r = false
   609  	//   return (or goto ret)
   610  	fn.Body.Append(ir.NewLabelStmt(base.Pos, neq))
   611  	fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, false)))
   612  	if compare.EqCanPanic(t) || anyCall(fn) {
   613  		// Epilogue is large, so share it with the equal case.
   614  		fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret))
   615  	} else {
   616  		// Epilogue is small, so don't bother sharing.
   617  		fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
   618  	}
   619  	// TODO(khr): the epilogue size detection condition above isn't perfect.
   620  	// We should really do a generic CL that shares epilogues across
   621  	// the board. See #24936.
   622  
   623  	if base.Flag.LowerR != 0 {
   624  		ir.DumpList("geneq body", fn.Body)
   625  	}
   626  
   627  	typecheck.FinishFuncBody()
   628  
   629  	fn.SetDupok(true)
   630  
   631  	ir.WithFunc(fn, func() {
   632  		typecheck.Stmts(fn.Body)
   633  	})
   634  
   635  	// Disable checknils while compiling this code.
   636  	// We are comparing a struct or an array,
   637  	// neither of which can be nil, and our comparisons
   638  	// are shallow.
   639  	fn.SetNilCheckDisabled(true)
   640  	return fn
   641  }
   642  
   643  // EqFor returns ONAME node represents type t's equal function, and a boolean
   644  // to indicates whether a length needs to be passed when calling the function.
   645  func EqFor(t *types.Type) (ir.Node, bool) {
   646  	switch types.AlgType(t) {
   647  	case types.AMEM:
   648  		return typecheck.LookupRuntime("memequal", t, t), true
   649  	case types.ASPECIAL:
   650  		fn := eqFunc(t)
   651  		return fn.Nname, false
   652  	}
   653  	base.Fatalf("EqFor %v", t)
   654  	return nil, false
   655  }
   656  
   657  func anyCall(fn *ir.Func) bool {
   658  	return ir.Any(fn, func(n ir.Node) bool {
   659  		// TODO(rsc): No methods?
   660  		op := n.Op()
   661  		return op == ir.OCALL || op == ir.OCALLFUNC
   662  	})
   663  }
   664  
   665  func hashmem(t *types.Type) ir.Node {
   666  	return typecheck.LookupRuntime("memhash", t)
   667  }
   668  

View as plain text