// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package scan

import (
	
	
	
	
)

// ScanSpanPackedGo is an optimized pure Go implementation of ScanSpanPacked.
func ( unsafe.Pointer,  *uintptr,  *gc.ObjMask,  uintptr,  *gc.PtrMask) ( int32) {
	 := newUnsafeBuf()
	 := uintptr(gc.SizeClassToSize[])
	// TODO(austin): Trim objMarks to the number of objects in this size class?
	for ,  := range  {
		for range sys.OnesCount64(uint64()) {
			 := sys.TrailingZeros64(uint64())
			 &^= 1 << 

			 := *goarch.PtrBits + 

			// objStartInSpan is the index of the word from mem where the
			// object stats. objEndInSpan points to the next object, i.e.
			// it's an exclusive upper bound.
			 :=  * uintptr() / goarch.PtrSize
			 :=  + /goarch.PtrSize

			// TODO: Another way to do this would be to extract the pointer mask
			// for this object (it's at most 64 bits) and do a bit iteration
			// over that.

			for  := ;  < ; ++ {
				 := *(*uintptr)(unsafe.Add(, *goarch.PtrSize))
				// Check if we should enqueue this word.
				//
				// We load the word before the check because, even though this
				// can lead to loading much more than necessary, it's faster.
				// Most likely this is because it warms up the hardware
				// prefetcher much better, and gives us more time before we need
				// the value.
				//
				// We discard values that can't possibly be useful pointers
				// here, too, because this filters out a lot of words and does
				// so with as little processing as possible.
				//
				// TODO: This is close to, but not entirely branchless.
				 := bool2int([/goarch.PtrBits]&(1<<(%goarch.PtrBits)) != 0)
				 := bool2int( >= 4096)
				 := & != 0
				.addIf(, )
			}
		}
	}
	// We don't know the true size of bufp, but we can at least catch obvious errors
	// in this function by making sure we didn't write more than gc.PageWords pointers
	// into the buffer.
	.check(gc.PageWords)
	return int32(.n)
}

// unsafeBuf allows for appending to a buffer without bounds-checks or branches.
type unsafeBuf[ any] struct {
	base *
	n    int
}

func newUnsafeBuf[ any]( *) unsafeBuf[] {
	return unsafeBuf[]{, 0}
}

// addIf appends a value to the buffer if the predicate is true.
//
// addIf speculatively writes to the next index of the buffer, so the caller
// must be certain that such a write will still be in-bounds with respect
// to the buffer's true capacity.
func ( *unsafeBuf[]) ( ,  bool) {
	*(*)(unsafe.Add(unsafe.Pointer(.base), .n*int(unsafe.Sizeof()))) = 
	.n += bool2int()
}

// check performs a bounds check on speculative writes into the buffer.
// Calling this shortly after a series of addIf calls is important to
// catch any misuse as fast as possible. Separating the bounds check from
// the append is more efficient, but one check to cover several appends is
// still efficient and much more memory safe.
func ( unsafeBuf[]) ( int) {
	// We fail even if b.n == cap because addIf speculatively writes one past b.n.
	if .n >=  {
		panic("unsafeBuf overflow")
	}
}

func bool2int( bool) int {
	// This particular pattern gets optimized by the compiler.
	var  int
	if  {
		 = 1
	}
	return 
}