Source file src/cmd/compile/internal/ssa/deadstore.go

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ssa
     6  
     7  import (
     8  	"cmd/compile/internal/ir"
     9  	"cmd/compile/internal/types"
    10  )
    11  
    12  // dse does dead-store elimination on the Function.
    13  // Dead stores are those which are unconditionally followed by
    14  // another store to the same location, with no intervening load.
    15  // This implementation only works within a basic block. TODO: use something more global.
    16  func dse(f *Func) {
    17  	var stores []*Value
    18  	loadUse := f.newSparseSet(f.NumValues())
    19  	defer f.retSparseSet(loadUse)
    20  	storeUse := f.newSparseSet(f.NumValues())
    21  	defer f.retSparseSet(storeUse)
    22  	shadowed := f.newSparseMap(f.NumValues())
    23  	defer f.retSparseMap(shadowed)
    24  	// localAddrs maps from a local variable (the Aux field of a LocalAddr value) to an instance of a LocalAddr value for that variable in the current block.
    25  	localAddrs := map[any]*Value{}
    26  	for _, b := range f.Blocks {
    27  		// Find all the stores in this block. Categorize their uses:
    28  		//  loadUse contains stores which are used by a subsequent load.
    29  		//  storeUse contains stores which are used by a subsequent store.
    30  		loadUse.clear()
    31  		storeUse.clear()
    32  		// TODO(deparker): use the 'clear' builtin once compiler bootstrap minimum version is raised to 1.21.
    33  		for k := range localAddrs {
    34  			delete(localAddrs, k)
    35  		}
    36  		stores = stores[:0]
    37  		for _, v := range b.Values {
    38  			if v.Op == OpPhi {
    39  				// Ignore phis - they will always be first and can't be eliminated
    40  				continue
    41  			}
    42  			if v.Type.IsMemory() {
    43  				stores = append(stores, v)
    44  				for _, a := range v.Args {
    45  					if a.Block == b && a.Type.IsMemory() {
    46  						storeUse.add(a.ID)
    47  						if v.Op != OpStore && v.Op != OpZero && v.Op != OpVarDef {
    48  							// CALL, DUFFCOPY, etc. are both
    49  							// reads and writes.
    50  							loadUse.add(a.ID)
    51  						}
    52  					}
    53  				}
    54  			} else {
    55  				if v.Op == OpLocalAddr {
    56  					if _, ok := localAddrs[v.Aux]; !ok {
    57  						localAddrs[v.Aux] = v
    58  					} else {
    59  						continue
    60  					}
    61  				}
    62  				for _, a := range v.Args {
    63  					if a.Block == b && a.Type.IsMemory() {
    64  						loadUse.add(a.ID)
    65  					}
    66  				}
    67  			}
    68  		}
    69  		if len(stores) == 0 {
    70  			continue
    71  		}
    72  
    73  		// find last store in the block
    74  		var last *Value
    75  		for _, v := range stores {
    76  			if storeUse.contains(v.ID) {
    77  				continue
    78  			}
    79  			if last != nil {
    80  				b.Fatalf("two final stores - simultaneous live stores %s %s", last.LongString(), v.LongString())
    81  			}
    82  			last = v
    83  		}
    84  		if last == nil {
    85  			b.Fatalf("no last store found - cycle?")
    86  		}
    87  
    88  		// Walk backwards looking for dead stores. Keep track of shadowed addresses.
    89  		// A "shadowed address" is a pointer, offset, and size describing a memory region that
    90  		// is known to be written. We keep track of shadowed addresses in the shadowed map,
    91  		// mapping the ID of the address to a shadowRange where future writes will happen.
    92  		// Since we're walking backwards, writes to a shadowed region are useless,
    93  		// as they will be immediately overwritten.
    94  		shadowed.clear()
    95  		v := last
    96  
    97  	walkloop:
    98  		if loadUse.contains(v.ID) {
    99  			// Someone might be reading this memory state.
   100  			// Clear all shadowed addresses.
   101  			shadowed.clear()
   102  		}
   103  		if v.Op == OpStore || v.Op == OpZero {
   104  			ptr := v.Args[0]
   105  			var off int64
   106  			for ptr.Op == OpOffPtr { // Walk to base pointer
   107  				off += ptr.AuxInt
   108  				ptr = ptr.Args[0]
   109  			}
   110  			var sz int64
   111  			if v.Op == OpStore {
   112  				sz = v.Aux.(*types.Type).Size()
   113  			} else { // OpZero
   114  				sz = v.AuxInt
   115  			}
   116  			if ptr.Op == OpLocalAddr {
   117  				if la, ok := localAddrs[ptr.Aux]; ok {
   118  					ptr = la
   119  				}
   120  			}
   121  			sr := shadowRange(shadowed.get(ptr.ID))
   122  			if sr.contains(off, off+sz) {
   123  				// Modify the store/zero into a copy of the memory state,
   124  				// effectively eliding the store operation.
   125  				if v.Op == OpStore {
   126  					// store addr value mem
   127  					v.SetArgs1(v.Args[2])
   128  				} else {
   129  					// zero addr mem
   130  					v.SetArgs1(v.Args[1])
   131  				}
   132  				v.Aux = nil
   133  				v.AuxInt = 0
   134  				v.Op = OpCopy
   135  			} else {
   136  				// Extend shadowed region.
   137  				shadowed.set(ptr.ID, int32(sr.merge(off, off+sz)))
   138  			}
   139  		}
   140  		// walk to previous store
   141  		if v.Op == OpPhi {
   142  			// At start of block.  Move on to next block.
   143  			// The memory phi, if it exists, is always
   144  			// the first logical store in the block.
   145  			// (Even if it isn't the first in the current b.Values order.)
   146  			continue
   147  		}
   148  		for _, a := range v.Args {
   149  			if a.Block == b && a.Type.IsMemory() {
   150  				v = a
   151  				goto walkloop
   152  			}
   153  		}
   154  	}
   155  }
   156  
   157  // A shadowRange encodes a set of byte offsets [lo():hi()] from
   158  // a given pointer that will be written to later in the block.
   159  // A zero shadowRange encodes an empty shadowed range (and so
   160  // does a -1 shadowRange, which is what sparsemap.get returns
   161  // on a failed lookup).
   162  type shadowRange int32
   163  
   164  func (sr shadowRange) lo() int64 {
   165  	return int64(sr & 0xffff)
   166  }
   167  
   168  func (sr shadowRange) hi() int64 {
   169  	return int64((sr >> 16) & 0xffff)
   170  }
   171  
   172  // contains reports whether [lo:hi] is completely within sr.
   173  func (sr shadowRange) contains(lo, hi int64) bool {
   174  	return lo >= sr.lo() && hi <= sr.hi()
   175  }
   176  
   177  // merge returns the union of sr and [lo:hi].
   178  // merge is allowed to return something smaller than the union.
   179  func (sr shadowRange) merge(lo, hi int64) shadowRange {
   180  	if lo < 0 || hi > 0xffff {
   181  		// Ignore offsets that are too large or small.
   182  		return sr
   183  	}
   184  	if sr.lo() == sr.hi() {
   185  		// Old range is empty - use new one.
   186  		return shadowRange(lo + hi<<16)
   187  	}
   188  	if hi < sr.lo() || lo > sr.hi() {
   189  		// The two regions don't overlap or abut, so we would
   190  		// have to keep track of multiple disjoint ranges.
   191  		// Because we can only keep one, keep the larger one.
   192  		if sr.hi()-sr.lo() >= hi-lo {
   193  			return sr
   194  		}
   195  		return shadowRange(lo + hi<<16)
   196  	}
   197  	// Regions overlap or abut - compute the union.
   198  	return shadowRange(min(lo, sr.lo()) + max(hi, sr.hi())<<16)
   199  }
   200  
   201  // elimDeadAutosGeneric deletes autos that are never accessed. To achieve this
   202  // we track the operations that the address of each auto reaches and if it only
   203  // reaches stores then we delete all the stores. The other operations will then
   204  // be eliminated by the dead code elimination pass.
   205  func elimDeadAutosGeneric(f *Func) {
   206  	addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches
   207  	elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is
   208  	var used ir.NameSet               // used autos that must be kept
   209  
   210  	// visit the value and report whether any of the maps are updated
   211  	visit := func(v *Value) (changed bool) {
   212  		args := v.Args
   213  		switch v.Op {
   214  		case OpAddr, OpLocalAddr:
   215  			// Propagate the address if it points to an auto.
   216  			n, ok := v.Aux.(*ir.Name)
   217  			if !ok || n.Class != ir.PAUTO {
   218  				return
   219  			}
   220  			if addr[v] == nil {
   221  				addr[v] = n
   222  				changed = true
   223  			}
   224  			return
   225  		case OpVarDef:
   226  			// v should be eliminated if we eliminate the auto.
   227  			n, ok := v.Aux.(*ir.Name)
   228  			if !ok || n.Class != ir.PAUTO {
   229  				return
   230  			}
   231  			if elim[v] == nil {
   232  				elim[v] = n
   233  				changed = true
   234  			}
   235  			return
   236  		case OpVarLive:
   237  			// Don't delete the auto if it needs to be kept alive.
   238  
   239  			// We depend on this check to keep the autotmp stack slots
   240  			// for open-coded defers from being removed (since they
   241  			// may not be used by the inline code, but will be used by
   242  			// panic processing).
   243  			n, ok := v.Aux.(*ir.Name)
   244  			if !ok || n.Class != ir.PAUTO {
   245  				return
   246  			}
   247  			if !used.Has(n) {
   248  				used.Add(n)
   249  				changed = true
   250  			}
   251  			return
   252  		case OpStore, OpMove, OpZero:
   253  			// v should be eliminated if we eliminate the auto.
   254  			n, ok := addr[args[0]]
   255  			if ok && elim[v] == nil {
   256  				elim[v] = n
   257  				changed = true
   258  			}
   259  			// Other args might hold pointers to autos.
   260  			args = args[1:]
   261  		}
   262  
   263  		// The code below assumes that we have handled all the ops
   264  		// with sym effects already. Sanity check that here.
   265  		// Ignore Args since they can't be autos.
   266  		if v.Op.SymEffect() != SymNone && v.Op != OpArg {
   267  			panic("unhandled op with sym effect")
   268  		}
   269  
   270  		if v.Uses == 0 && v.Op != OpNilCheck && !v.Op.IsCall() && !v.Op.HasSideEffects() || len(args) == 0 {
   271  			// We need to keep nil checks even if they have no use.
   272  			// Also keep calls and values that have side effects.
   273  			return
   274  		}
   275  
   276  		// If the address of the auto reaches a memory or control
   277  		// operation not covered above then we probably need to keep it.
   278  		// We also need to keep autos if they reach Phis (issue #26153).
   279  		if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil {
   280  			for _, a := range args {
   281  				if n, ok := addr[a]; ok {
   282  					if !used.Has(n) {
   283  						used.Add(n)
   284  						changed = true
   285  					}
   286  				}
   287  			}
   288  			return
   289  		}
   290  
   291  		// Propagate any auto addresses through v.
   292  		var node *ir.Name
   293  		for _, a := range args {
   294  			if n, ok := addr[a]; ok && !used.Has(n) {
   295  				if node == nil {
   296  					node = n
   297  				} else if node != n {
   298  					// Most of the time we only see one pointer
   299  					// reaching an op, but some ops can take
   300  					// multiple pointers (e.g. NeqPtr, Phi etc.).
   301  					// This is rare, so just propagate the first
   302  					// value to keep things simple.
   303  					used.Add(n)
   304  					changed = true
   305  				}
   306  			}
   307  		}
   308  		if node == nil {
   309  			return
   310  		}
   311  		if addr[v] == nil {
   312  			// The address of an auto reaches this op.
   313  			addr[v] = node
   314  			changed = true
   315  			return
   316  		}
   317  		if addr[v] != node {
   318  			// This doesn't happen in practice, but catch it just in case.
   319  			used.Add(node)
   320  			changed = true
   321  		}
   322  		return
   323  	}
   324  
   325  	iterations := 0
   326  	for {
   327  		if iterations == 4 {
   328  			// give up
   329  			return
   330  		}
   331  		iterations++
   332  		changed := false
   333  		for _, b := range f.Blocks {
   334  			for _, v := range b.Values {
   335  				changed = visit(v) || changed
   336  			}
   337  			// keep the auto if its address reaches a control value
   338  			for _, c := range b.ControlValues() {
   339  				if n, ok := addr[c]; ok && !used.Has(n) {
   340  					used.Add(n)
   341  					changed = true
   342  				}
   343  			}
   344  		}
   345  		if !changed {
   346  			break
   347  		}
   348  	}
   349  
   350  	// Eliminate stores to unread autos.
   351  	for v, n := range elim {
   352  		if used.Has(n) {
   353  			continue
   354  		}
   355  		// replace with OpCopy
   356  		v.SetArgs1(v.MemoryArg())
   357  		v.Aux = nil
   358  		v.AuxInt = 0
   359  		v.Op = OpCopy
   360  	}
   361  }
   362  
   363  // elimUnreadAutos deletes stores (and associated bookkeeping ops VarDef and VarKill)
   364  // to autos that are never read from.
   365  func elimUnreadAutos(f *Func) {
   366  	// Loop over all ops that affect autos taking note of which
   367  	// autos we need and also stores that we might be able to
   368  	// eliminate.
   369  	var seen ir.NameSet
   370  	var stores []*Value
   371  	for _, b := range f.Blocks {
   372  		for _, v := range b.Values {
   373  			n, ok := v.Aux.(*ir.Name)
   374  			if !ok {
   375  				continue
   376  			}
   377  			if n.Class != ir.PAUTO {
   378  				continue
   379  			}
   380  
   381  			effect := v.Op.SymEffect()
   382  			switch effect {
   383  			case SymNone, SymWrite:
   384  				// If we haven't seen the auto yet
   385  				// then this might be a store we can
   386  				// eliminate.
   387  				if !seen.Has(n) {
   388  					stores = append(stores, v)
   389  				}
   390  			default:
   391  				// Assume the auto is needed (loaded,
   392  				// has its address taken, etc.).
   393  				// Note we have to check the uses
   394  				// because dead loads haven't been
   395  				// eliminated yet.
   396  				if v.Uses > 0 {
   397  					seen.Add(n)
   398  				}
   399  			}
   400  		}
   401  	}
   402  
   403  	// Eliminate stores to unread autos.
   404  	for _, store := range stores {
   405  		n, _ := store.Aux.(*ir.Name)
   406  		if seen.Has(n) {
   407  			continue
   408  		}
   409  
   410  		// replace store with OpCopy
   411  		store.SetArgs1(store.MemoryArg())
   412  		store.Aux = nil
   413  		store.AuxInt = 0
   414  		store.Op = OpCopy
   415  	}
   416  }
   417  

View as plain text