Source file src/runtime/slice.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"internal/goexperiment"
    11  	"internal/runtime/math"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  type slice struct {
    17  	array unsafe.Pointer
    18  	len   int
    19  	cap   int
    20  }
    21  
    22  // A notInHeapSlice is a slice backed by internal/runtime/sys.NotInHeap memory.
    23  type notInHeapSlice struct {
    24  	array *notInHeap
    25  	len   int
    26  	cap   int
    27  }
    28  
    29  func panicmakeslicelen() {
    30  	panic(errorString("makeslice: len out of range"))
    31  }
    32  
    33  func panicmakeslicecap() {
    34  	panic(errorString("makeslice: cap out of range"))
    35  }
    36  
    37  // makeslicecopy allocates a slice of "tolen" elements of type "et",
    38  // then copies "fromlen" elements of type "et" into that new allocation from "from".
    39  func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer {
    40  	var tomem, copymem uintptr
    41  	if uintptr(tolen) > uintptr(fromlen) {
    42  		var overflow bool
    43  		tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen))
    44  		if overflow || tomem > maxAlloc || tolen < 0 {
    45  			panicmakeslicelen()
    46  		}
    47  		copymem = et.Size_ * uintptr(fromlen)
    48  	} else {
    49  		// fromlen is a known good length providing and equal or greater than tolen,
    50  		// thereby making tolen a good slice length too as from and to slices have the
    51  		// same element width.
    52  		tomem = et.Size_ * uintptr(tolen)
    53  		copymem = tomem
    54  	}
    55  
    56  	var to unsafe.Pointer
    57  	if !et.Pointers() {
    58  		to = mallocgc(tomem, nil, false)
    59  		if copymem < tomem {
    60  			memclrNoHeapPointers(add(to, copymem), tomem-copymem)
    61  		}
    62  	} else {
    63  		// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
    64  		to = mallocgc(tomem, et, true)
    65  		if copymem > 0 && writeBarrier.enabled {
    66  			// Only shade the pointers in old.array since we know the destination slice to
    67  			// only contains nil pointers because it has been cleared during alloc.
    68  			//
    69  			// It's safe to pass a type to this function as an optimization because
    70  			// from and to only ever refer to memory representing whole values of
    71  			// type et. See the comment on bulkBarrierPreWrite.
    72  			bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem, et)
    73  		}
    74  	}
    75  
    76  	if raceenabled {
    77  		callerpc := sys.GetCallerPC()
    78  		pc := abi.FuncPCABIInternal(makeslicecopy)
    79  		racereadrangepc(from, copymem, callerpc, pc)
    80  	}
    81  	if msanenabled {
    82  		msanread(from, copymem)
    83  	}
    84  	if asanenabled {
    85  		asanread(from, copymem)
    86  	}
    87  
    88  	memmove(to, from, copymem)
    89  
    90  	return to
    91  }
    92  
    93  // makeslice should be an internal detail,
    94  // but widely used packages access it using linkname.
    95  // Notable members of the hall of shame include:
    96  //   - github.com/bytedance/sonic
    97  //
    98  // Do not remove or change the type signature.
    99  // See go.dev/issue/67401.
   100  //
   101  //go:linkname makeslice
   102  func makeslice(et *_type, len, cap int) unsafe.Pointer {
   103  	mem, overflow := math.MulUintptr(et.Size_, uintptr(cap))
   104  	if overflow || mem > maxAlloc || len < 0 || len > cap {
   105  		// NOTE: Produce a 'len out of range' error instead of a
   106  		// 'cap out of range' error when someone does make([]T, bignumber).
   107  		// 'cap out of range' is true too, but since the cap is only being
   108  		// supplied implicitly, saying len is clearer.
   109  		// See golang.org/issue/4085.
   110  		mem, overflow := math.MulUintptr(et.Size_, uintptr(len))
   111  		if overflow || mem > maxAlloc || len < 0 {
   112  			panicmakeslicelen()
   113  		}
   114  		panicmakeslicecap()
   115  	}
   116  
   117  	return mallocgc(mem, et, true)
   118  }
   119  
   120  func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer {
   121  	len := int(len64)
   122  	if int64(len) != len64 {
   123  		panicmakeslicelen()
   124  	}
   125  
   126  	cap := int(cap64)
   127  	if int64(cap) != cap64 {
   128  		panicmakeslicecap()
   129  	}
   130  
   131  	return makeslice(et, len, cap)
   132  }
   133  
   134  // growslice allocates new backing store for a slice.
   135  //
   136  // arguments:
   137  //
   138  //	oldPtr = pointer to the slice's backing array
   139  //	newLen = new length (= oldLen + num)
   140  //	oldCap = original slice's capacity.
   141  //	   num = number of elements being added
   142  //	    et = element type
   143  //
   144  // return values:
   145  //
   146  //	newPtr = pointer to the new backing store
   147  //	newLen = same value as the argument
   148  //	newCap = capacity of the new backing store
   149  //
   150  // Requires that uint(newLen) > uint(oldCap).
   151  // Assumes the original slice length is newLen - num
   152  //
   153  // A new backing store is allocated with space for at least newLen elements.
   154  // Existing entries [0, oldLen) are copied over to the new backing store.
   155  // Added entries [oldLen, newLen) are not initialized by growslice
   156  // (although for pointer-containing element types, they are zeroed). They
   157  // must be initialized by the caller.
   158  // Trailing entries [newLen, newCap) are zeroed.
   159  //
   160  // growslice's odd calling convention makes the generated code that calls
   161  // this function simpler. In particular, it accepts and returns the
   162  // new length so that the old length is not live (does not need to be
   163  // spilled/restored) and the new length is returned (also does not need
   164  // to be spilled/restored).
   165  //
   166  // growslice should be an internal detail,
   167  // but widely used packages access it using linkname.
   168  // Notable members of the hall of shame include:
   169  //   - github.com/bytedance/sonic
   170  //   - github.com/chenzhuoyu/iasm
   171  //   - github.com/cloudwego/dynamicgo
   172  //   - github.com/ugorji/go/codec
   173  //
   174  // Do not remove or change the type signature.
   175  // See go.dev/issue/67401.
   176  //
   177  //go:linkname growslice
   178  func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice {
   179  	oldLen := newLen - num
   180  	if raceenabled {
   181  		callerpc := sys.GetCallerPC()
   182  		racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice))
   183  	}
   184  	if msanenabled {
   185  		msanread(oldPtr, uintptr(oldLen*int(et.Size_)))
   186  	}
   187  	if asanenabled {
   188  		asanread(oldPtr, uintptr(oldLen*int(et.Size_)))
   189  	}
   190  
   191  	if newLen < 0 {
   192  		panic(errorString("growslice: len out of range"))
   193  	}
   194  
   195  	if et.Size_ == 0 {
   196  		// append should not create a slice with nil pointer but non-zero len.
   197  		// We assume that append doesn't need to preserve oldPtr in this case.
   198  		return slice{unsafe.Pointer(&zerobase), newLen, newLen}
   199  	}
   200  
   201  	newcap := nextslicecap(newLen, oldCap)
   202  
   203  	var overflow bool
   204  	var lenmem, newlenmem, capmem uintptr
   205  	// Specialize for common values of et.Size.
   206  	// For 1 we don't need any division/multiplication.
   207  	// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
   208  	// For powers of 2, use a variable shift.
   209  	noscan := !et.Pointers()
   210  	switch {
   211  	case et.Size_ == 1:
   212  		lenmem = uintptr(oldLen)
   213  		newlenmem = uintptr(newLen)
   214  		capmem = roundupsize(uintptr(newcap), noscan)
   215  		overflow = uintptr(newcap) > maxAlloc
   216  		newcap = int(capmem)
   217  	case et.Size_ == goarch.PtrSize:
   218  		lenmem = uintptr(oldLen) * goarch.PtrSize
   219  		newlenmem = uintptr(newLen) * goarch.PtrSize
   220  		capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan)
   221  		overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
   222  		newcap = int(capmem / goarch.PtrSize)
   223  	case isPowerOfTwo(et.Size_):
   224  		var shift uintptr
   225  		if goarch.PtrSize == 8 {
   226  			// Mask shift for better code generation.
   227  			shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63
   228  		} else {
   229  			shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31
   230  		}
   231  		lenmem = uintptr(oldLen) << shift
   232  		newlenmem = uintptr(newLen) << shift
   233  		capmem = roundupsize(uintptr(newcap)<<shift, noscan)
   234  		overflow = uintptr(newcap) > (maxAlloc >> shift)
   235  		newcap = int(capmem >> shift)
   236  		capmem = uintptr(newcap) << shift
   237  	default:
   238  		lenmem = uintptr(oldLen) * et.Size_
   239  		newlenmem = uintptr(newLen) * et.Size_
   240  		capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap))
   241  		capmem = roundupsize(capmem, noscan)
   242  		newcap = int(capmem / et.Size_)
   243  		capmem = uintptr(newcap) * et.Size_
   244  	}
   245  
   246  	// The check of overflow in addition to capmem > maxAlloc is needed
   247  	// to prevent an overflow which can be used to trigger a segfault
   248  	// on 32bit architectures with this example program:
   249  	//
   250  	// type T [1<<27 + 1]int64
   251  	//
   252  	// var d T
   253  	// var s []T
   254  	//
   255  	// func main() {
   256  	//   s = append(s, d, d, d, d)
   257  	//   print(len(s), "\n")
   258  	// }
   259  	if overflow || capmem > maxAlloc {
   260  		panic(errorString("growslice: len out of range"))
   261  	}
   262  
   263  	var p unsafe.Pointer
   264  	if !et.Pointers() {
   265  		p = mallocgc(capmem, nil, false)
   266  		// The append() that calls growslice is going to overwrite from oldLen to newLen.
   267  		// Only clear the part that will not be overwritten.
   268  		// The reflect_growslice() that calls growslice will manually clear
   269  		// the region not cleared here.
   270  		memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem)
   271  	} else {
   272  		// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
   273  		p = mallocgc(capmem, et, true)
   274  		if lenmem > 0 && writeBarrier.enabled {
   275  			// Only shade the pointers in oldPtr since we know the destination slice p
   276  			// only contains nil pointers because it has been cleared during alloc.
   277  			//
   278  			// It's safe to pass a type to this function as an optimization because
   279  			// from and to only ever refer to memory representing whole values of
   280  			// type et. See the comment on bulkBarrierPreWrite.
   281  			bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes, et)
   282  		}
   283  	}
   284  	memmove(p, oldPtr, lenmem)
   285  
   286  	return slice{p, newLen, newcap}
   287  }
   288  
   289  // growsliceNoAlias is like growslice but only for the case where
   290  // we know that oldPtr is not aliased.
   291  //
   292  // In other words, the caller must know that there are no other references
   293  // to the backing memory of the slice being grown aside from the slice header
   294  // that will be updated with new backing memory when growsliceNoAlias
   295  // returns, and therefore oldPtr must be the only pointer to its referent
   296  // aside from the slice header updated by the returned slice.
   297  //
   298  // In addition, oldPtr must point to the start of the allocation and match
   299  // the pointer that was returned by mallocgc. In particular, oldPtr must not
   300  // be an interior pointer, such as after a reslice.
   301  //
   302  // See freegc for details.
   303  func growsliceNoAlias(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice {
   304  	s := growslice(oldPtr, newLen, oldCap, num, et)
   305  	if goexperiment.RuntimeFreegc && oldPtr != nil && oldPtr != s.array {
   306  		if gp := getg(); uintptr(oldPtr) < gp.stack.lo || gp.stack.hi <= uintptr(oldPtr) {
   307  			// oldPtr does not point into the current stack, and it is not
   308  			// the data pointer for s after the grow, so attempt to free it.
   309  			// (Note that freegc also verifies that oldPtr does not point into our stack,
   310  			// but checking here first is slightly cheaper for the case when
   311  			// oldPtr is on the stack and freegc would be a no-op.)
   312  			//
   313  			// TODO(thepudds): it may be that oldPtr==s.array only when elemsize==0,
   314  			// so perhaps we could prohibit growsliceNoAlias being called in that case
   315  			// and eliminate that check here, or alternatively, we could lean into
   316  			// freegc being a no-op for zero-sized allocations (that is, no check of
   317  			// oldPtr != s.array here and just let freegc return quickly).
   318  			noscan := !et.Pointers()
   319  			freegc(oldPtr, uintptr(oldCap)*et.Size_, noscan)
   320  		}
   321  	}
   322  	return s
   323  }
   324  
   325  // nextslicecap computes the next appropriate slice length.
   326  func nextslicecap(newLen, oldCap int) int {
   327  	newcap := oldCap
   328  	doublecap := newcap + newcap
   329  	if newLen > doublecap {
   330  		return newLen
   331  	}
   332  
   333  	const threshold = 256
   334  	if oldCap < threshold {
   335  		return doublecap
   336  	}
   337  	for {
   338  		// Transition from growing 2x for small slices
   339  		// to growing 1.25x for large slices. This formula
   340  		// gives a smooth-ish transition between the two.
   341  		newcap += (newcap + 3*threshold) >> 2
   342  
   343  		// We need to check `newcap >= newLen` and whether `newcap` overflowed.
   344  		// newLen is guaranteed to be larger than zero, hence
   345  		// when newcap overflows then `uint(newcap) > uint(newLen)`.
   346  		// This allows to check for both with the same comparison.
   347  		if uint(newcap) >= uint(newLen) {
   348  			break
   349  		}
   350  	}
   351  
   352  	// Set newcap to the requested cap when
   353  	// the newcap calculation overflowed.
   354  	if newcap <= 0 {
   355  		return newLen
   356  	}
   357  	return newcap
   358  }
   359  
   360  // reflect_growslice should be an internal detail,
   361  // but widely used packages access it using linkname.
   362  // Notable members of the hall of shame include:
   363  //   - github.com/cloudwego/dynamicgo
   364  //
   365  // Do not remove or change the type signature.
   366  // See go.dev/issue/67401.
   367  //
   368  //go:linkname reflect_growslice reflect.growslice
   369  func reflect_growslice(et *_type, old slice, num int) slice {
   370  	// Semantically equivalent to slices.Grow, except that the caller
   371  	// is responsible for ensuring that old.len+num > old.cap.
   372  	num -= old.cap - old.len // preserve memory of old[old.len:old.cap]
   373  	new := growslice(old.array, old.cap+num, old.cap, num, et)
   374  	// growslice does not zero out new[old.cap:new.len] since it assumes that
   375  	// the memory will be overwritten by an append() that called growslice.
   376  	// Since the caller of reflect_growslice is not append(),
   377  	// zero out this region before returning the slice to the reflect package.
   378  	if !et.Pointers() {
   379  		oldcapmem := uintptr(old.cap) * et.Size_
   380  		newlenmem := uintptr(new.len) * et.Size_
   381  		memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem)
   382  	}
   383  	new.len = old.len // preserve the old length
   384  	return new
   385  }
   386  
   387  func isPowerOfTwo(x uintptr) bool {
   388  	return x&(x-1) == 0
   389  }
   390  
   391  // slicecopy is used to copy from a string or slice of pointerless elements into a slice.
   392  func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int {
   393  	if fromLen == 0 || toLen == 0 {
   394  		return 0
   395  	}
   396  
   397  	n := fromLen
   398  	if toLen < n {
   399  		n = toLen
   400  	}
   401  
   402  	if width == 0 {
   403  		return n
   404  	}
   405  
   406  	size := uintptr(n) * width
   407  	if raceenabled {
   408  		callerpc := sys.GetCallerPC()
   409  		pc := abi.FuncPCABIInternal(slicecopy)
   410  		racereadrangepc(fromPtr, size, callerpc, pc)
   411  		racewriterangepc(toPtr, size, callerpc, pc)
   412  	}
   413  	if msanenabled {
   414  		msanread(fromPtr, size)
   415  		msanwrite(toPtr, size)
   416  	}
   417  	if asanenabled {
   418  		asanread(fromPtr, size)
   419  		asanwrite(toPtr, size)
   420  	}
   421  
   422  	if size == 1 { // common case worth about 2x to do here
   423  		// TODO: is this still worth it with new memmove impl?
   424  		*(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer
   425  	} else {
   426  		memmove(toPtr, fromPtr, size)
   427  	}
   428  	return n
   429  }
   430  
   431  //go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZero
   432  func bytealg_MakeNoZero(len int) []byte {
   433  	if uintptr(len) > maxAlloc {
   434  		panicmakeslicelen()
   435  	}
   436  	cap := roundupsize(uintptr(len), true)
   437  	return unsafe.Slice((*byte)(mallocgc(cap, nil, false)), cap)[:len]
   438  }
   439  
   440  // moveSlice copies the input slice to the heap and returns it.
   441  // et is the element type of the slice.
   442  func moveSlice(et *_type, old unsafe.Pointer, len, cap int) (unsafe.Pointer, int, int) {
   443  	if cap == 0 {
   444  		if old != nil {
   445  			old = unsafe.Pointer(&zerobase)
   446  		}
   447  		return old, 0, 0
   448  	}
   449  	capmem := uintptr(cap) * et.Size_
   450  	new := mallocgc(capmem, et, true)
   451  	bulkBarrierPreWriteSrcOnly(uintptr(new), uintptr(old), capmem, et)
   452  	memmove(new, old, capmem)
   453  	return new, len, cap
   454  }
   455  
   456  // moveSliceNoScan is like moveSlice except the element type is known to
   457  // not have any pointers. We instead pass in the size of the element.
   458  func moveSliceNoScan(elemSize uintptr, old unsafe.Pointer, len, cap int) (unsafe.Pointer, int, int) {
   459  	if cap == 0 {
   460  		if old != nil {
   461  			old = unsafe.Pointer(&zerobase)
   462  		}
   463  		return old, 0, 0
   464  	}
   465  	capmem := uintptr(cap) * elemSize
   466  	new := mallocgc(capmem, nil, false)
   467  	memmove(new, old, capmem)
   468  	return new, len, cap
   469  }
   470  
   471  // moveSliceNoCap is like moveSlice, but can pick any appropriate capacity
   472  // for the returned slice.
   473  // Elements between len and cap in the returned slice will be zeroed.
   474  func moveSliceNoCap(et *_type, old unsafe.Pointer, len int) (unsafe.Pointer, int, int) {
   475  	if len == 0 {
   476  		if old != nil {
   477  			old = unsafe.Pointer(&zerobase)
   478  		}
   479  		return old, 0, 0
   480  	}
   481  	lenmem := uintptr(len) * et.Size_
   482  	capmem := roundupsize(lenmem, false)
   483  	new := mallocgc(capmem, et, true)
   484  	bulkBarrierPreWriteSrcOnly(uintptr(new), uintptr(old), lenmem, et)
   485  	memmove(new, old, lenmem)
   486  	return new, len, int(capmem / et.Size_)
   487  }
   488  
   489  // moveSliceNoCapNoScan is a combination of moveSliceNoScan and moveSliceNoCap.
   490  func moveSliceNoCapNoScan(elemSize uintptr, old unsafe.Pointer, len int) (unsafe.Pointer, int, int) {
   491  	if len == 0 {
   492  		if old != nil {
   493  			old = unsafe.Pointer(&zerobase)
   494  		}
   495  		return old, 0, 0
   496  	}
   497  	lenmem := uintptr(len) * elemSize
   498  	capmem := roundupsize(lenmem, true)
   499  	new := mallocgc(capmem, nil, false)
   500  	memmove(new, old, lenmem)
   501  	if capmem > lenmem {
   502  		memclrNoHeapPointers(add(new, lenmem), capmem-lenmem)
   503  	}
   504  	return new, len, int(capmem / elemSize)
   505  }
   506  
   507  // growsliceBuf is like growslice, but we can use the given buffer
   508  // as a backing store if we want. bufPtr must be on the stack.
   509  func growsliceBuf(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type, bufPtr unsafe.Pointer, bufLen int) slice {
   510  	if newLen > bufLen {
   511  		// Doesn't fit, process like a normal growslice.
   512  		return growslice(oldPtr, newLen, oldCap, num, et)
   513  	}
   514  	oldLen := newLen - num
   515  	if oldPtr != bufPtr && oldLen != 0 {
   516  		// Move data to start of buffer.
   517  		// Note: bufPtr is on the stack, so no write barrier needed.
   518  		memmove(bufPtr, oldPtr, uintptr(oldLen)*et.Size_)
   519  	}
   520  	// Pick a new capacity.
   521  	//
   522  	// Unlike growslice, we don't need to double the size each time.
   523  	// The work done here is not proportional to the length of the slice.
   524  	// (Unless the memmove happens above, but that is rare, and in any
   525  	// case there are not many elements on this path.)
   526  	//
   527  	// Instead, we try to just bump up to the next size class.
   528  	// This will ensure that we don't waste any space when we eventually
   529  	// call moveSlice with the resulting slice.
   530  	newCap := int(roundupsize(uintptr(newLen)*et.Size_, !et.Pointers()) / et.Size_)
   531  
   532  	// Zero slice beyond newLen.
   533  	// The buffer is stack memory, so NoHeapPointers is ok.
   534  	// Caller will overwrite [oldLen:newLen], so we don't need to zero that portion.
   535  	// If et.Pointers(), buffer is at least initialized so we don't need to
   536  	// worry about the caller overwriting junk in [oldLen:newLen].
   537  	if newLen < newCap {
   538  		memclrNoHeapPointers(add(bufPtr, uintptr(newLen)*et.Size_), uintptr(newCap-newLen)*et.Size_)
   539  	}
   540  
   541  	return slice{bufPtr, newLen, newCap}
   542  }
   543  
   544  // growsliceBufNoAlias is a combination of growsliceBuf and growsliceNoAlias.
   545  // bufPtr must be on the stack.
   546  func growsliceBufNoAlias(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type, bufPtr unsafe.Pointer, bufLen int) slice {
   547  	s := growsliceBuf(oldPtr, newLen, oldCap, num, et, bufPtr, bufLen)
   548  	if goexperiment.RuntimeFreegc && oldPtr != bufPtr && oldPtr != nil && oldPtr != s.array {
   549  		// oldPtr is not bufPtr (the stack buffer) and it is not
   550  		// the data pointer for s after the grow, so attempt to free it.
   551  		// (Note that freegc does a broader check that oldPtr does not point into our stack,
   552  		// but checking here first is slightly cheaper for a common case when oldPtr is bufPtr
   553  		// and freegc would be a no-op.)
   554  		//
   555  		// TODO(thepudds): see related TODO in growsliceNoAlias about possibly eliminating
   556  		// the oldPtr != s.array check.
   557  		noscan := !et.Pointers()
   558  		freegc(oldPtr, uintptr(oldCap)*et.Size_, noscan)
   559  	}
   560  	return s
   561  }
   562  

View as plain text