// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package tar

import (
	
	
	
	
	
	
	
	
	
)

// Writer provides sequential writing of a tar archive.
// [Writer.WriteHeader] begins a new file with the provided [Header],
// and then Writer can be treated as an io.Writer to supply that file's data.
type Writer struct {
	w    io.Writer
	pad  int64      // Amount of padding to write after current file entry
	curr fileWriter // Writer for current file entry
	hdr  Header     // Shallow copy of Header that is safe for mutations
	blk  block      // Buffer to use as temporary local storage

	// err is a persistent error.
	// It is only the responsibility of every exported method of Writer to
	// ensure that this error is sticky.
	err error
}

// NewWriter creates a new Writer writing to w.
func ( io.Writer) *Writer {
	return &Writer{w: , curr: &regFileWriter{, 0}}
}

type fileWriter interface {
	io.Writer
	fileState

	ReadFrom(io.Reader) (int64, error)
}

// Flush finishes writing the current file's block padding.
// The current file must be fully written before Flush can be called.
//
// This is unnecessary as the next call to [Writer.WriteHeader] or [Writer.Close]
// will implicitly flush out the file's padding.
func ( *Writer) () error {
	if .err != nil {
		return .err
	}
	if  := .curr.logicalRemaining();  > 0 {
		return fmt.Errorf("archive/tar: missed writing %d bytes", )
	}
	if _, .err = .w.Write(zeroBlock[:.pad]); .err != nil {
		return .err
	}
	.pad = 0
	return nil
}

// WriteHeader writes hdr and prepares to accept the file's contents.
// The Header.Size determines how many bytes can be written for the next file.
// If the current file is not fully written, then this returns an error.
// This implicitly flushes any padding necessary before writing the header.
func ( *Writer) ( *Header) error {
	if  := .Flush();  != nil {
		return 
	}
	.hdr = * // Shallow copy of Header

	// Avoid usage of the legacy TypeRegA flag, and automatically promote
	// it to use TypeReg or TypeDir.
	if .hdr.Typeflag == TypeRegA {
		if strings.HasSuffix(.hdr.Name, "/") {
			.hdr.Typeflag = TypeDir
		} else {
			.hdr.Typeflag = TypeReg
		}
	}

	// Round ModTime and ignore AccessTime and ChangeTime unless
	// the format is explicitly chosen.
	// This ensures nominal usage of WriteHeader (without specifying the format)
	// does not always result in the PAX format being chosen, which
	// causes a 1KiB increase to every header.
	if .hdr.Format == FormatUnknown {
		.hdr.ModTime = .hdr.ModTime.Round(time.Second)
		.hdr.AccessTime = time.Time{}
		.hdr.ChangeTime = time.Time{}
	}

	, ,  := .hdr.allowedFormats()
	switch {
	case .has(FormatUSTAR):
		.err = .writeUSTARHeader(&.hdr)
		return .err
	case .has(FormatPAX):
		.err = .writePAXHeader(&.hdr, )
		return .err
	case .has(FormatGNU):
		.err = .writeGNUHeader(&.hdr)
		return .err
	default:
		return  // Non-fatal error
	}
}

func ( *Writer) ( *Header) error {
	// Check if we can use USTAR prefix/suffix splitting.
	var  string
	if , ,  := splitUSTARPath(.Name);  {
		, .Name = , 
	}

	// Pack the main header.
	var  formatter
	 := .templateV7Plus(, .formatString, .formatOctal)
	.formatString(.toUSTAR().prefix(), )
	.setFormat(FormatUSTAR)
	if .err != nil {
		return .err // Should never happen since header is validated
	}
	return .writeRawHeader(, .Size, .Typeflag)
}

func ( *Writer) ( *Header,  map[string]string) error {
	,  := .Name, .Size

	// TODO(dsnet): Re-enable this when adding sparse support.
	// See https://golang.org/issue/22735
	/*
		// Handle sparse files.
		var spd sparseDatas
		var spb []byte
		if len(hdr.SparseHoles) > 0 {
			sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
			sph = alignSparseEntries(sph, hdr.Size)
			spd = invertSparseEntries(sph, hdr.Size)

			// Format the sparse map.
			hdr.Size = 0 // Replace with encoded size
			spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n')
			for _, s := range spd {
				hdr.Size += s.Length
				spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n')
				spb = append(strconv.AppendInt(spb, s.Length, 10), '\n')
			}
			pad := blockPadding(int64(len(spb)))
			spb = append(spb, zeroBlock[:pad]...)
			hdr.Size += int64(len(spb)) // Accounts for encoded sparse map

			// Add and modify appropriate PAX records.
			dir, file := path.Split(realName)
			hdr.Name = path.Join(dir, "GNUSparseFile.0", file)
			paxHdrs[paxGNUSparseMajor] = "1"
			paxHdrs[paxGNUSparseMinor] = "0"
			paxHdrs[paxGNUSparseName] = realName
			paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10)
			paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10)
			delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName
		}
	*/
	_ = 

	// Write PAX records to the output.
	 := .Typeflag == TypeXGlobalHeader
	if len() > 0 ||  {
		// Write each record to a buffer.
		var  strings.Builder
		// Sort keys for deterministic ordering.
		for ,  := range slices.Sorted(maps.Keys()) {
			,  := formatPAXRecord(, [])
			if  != nil {
				return 
			}
			.WriteString()
		}

		// Write the extended header file.
		var  string
		var  byte
		if  {
			 = 
			if  == "" {
				 = "GlobalHead.0.0"
			}
			 = TypeXGlobalHeader
		} else {
			,  := path.Split()
			 = path.Join(, "PaxHeaders.0", )
			 = TypeXHeader
		}
		 := .String()
		if len() > maxSpecialFileSize {
			return ErrFieldTooLong
		}
		if  := .writeRawFile(, , , FormatPAX);  != nil ||  {
			return  // Global headers return here
		}
	}

	// Pack the main header.
	var  formatter // Ignore errors since they are expected
	 := func( []byte,  string) { .formatString(, toASCII()) }
	 := .templateV7Plus(, , .formatOctal)
	.setFormat(FormatPAX)
	if  := .writeRawHeader(, .Size, .Typeflag);  != nil {
		return 
	}

	// TODO(dsnet): Re-enable this when adding sparse support.
	// See https://golang.org/issue/22735
	/*
		// Write the sparse map and setup the sparse writer if necessary.
		if len(spd) > 0 {
			// Use tw.curr since the sparse map is accounted for in hdr.Size.
			if _, err := tw.curr.Write(spb); err != nil {
				return err
			}
			tw.curr = &sparseFileWriter{tw.curr, spd, 0}
		}
	*/
	return nil
}

func ( *Writer) ( *Header) error {
	// Use long-link files if Name or Linkname exceeds the field size.
	const  = "././@LongLink"
	if len(.Name) > nameSize {
		 := .Name + "\x00"
		if  := .writeRawFile(, , TypeGNULongName, FormatGNU);  != nil {
			return 
		}
	}
	if len(.Linkname) > nameSize {
		 := .Linkname + "\x00"
		if  := .writeRawFile(, , TypeGNULongLink, FormatGNU);  != nil {
			return 
		}
	}

	// Pack the main header.
	var  formatter // Ignore errors since they are expected
	var  sparseDatas
	var  []byte
	 := .templateV7Plus(, .formatString, .formatNumeric)
	if !.AccessTime.IsZero() {
		.formatNumeric(.toGNU().accessTime(), .AccessTime.Unix())
	}
	if !.ChangeTime.IsZero() {
		.formatNumeric(.toGNU().changeTime(), .ChangeTime.Unix())
	}
	// TODO(dsnet): Re-enable this when adding sparse support.
	// See https://golang.org/issue/22735
	/*
		if hdr.Typeflag == TypeGNUSparse {
			sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
			sph = alignSparseEntries(sph, hdr.Size)
			spd = invertSparseEntries(sph, hdr.Size)

			// Format the sparse map.
			formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas {
				for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ {
					f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset)
					f.formatNumeric(sa.Entry(i).Length(), sp[0].Length)
					sp = sp[1:]
				}
				if len(sp) > 0 {
					sa.IsExtended()[0] = 1
				}
				return sp
			}
			sp2 := formatSPD(spd, blk.GNU().Sparse())
			for len(sp2) > 0 {
				var spHdr block
				sp2 = formatSPD(sp2, spHdr.Sparse())
				spb = append(spb, spHdr[:]...)
			}

			// Update size fields in the header block.
			realSize := hdr.Size
			hdr.Size = 0 // Encoded size; does not account for encoded sparse map
			for _, s := range spd {
				hdr.Size += s.Length
			}
			copy(blk.V7().Size(), zeroBlock[:]) // Reset field
			f.formatNumeric(blk.V7().Size(), hdr.Size)
			f.formatNumeric(blk.GNU().RealSize(), realSize)
		}
	*/
	.setFormat(FormatGNU)
	if  := .writeRawHeader(, .Size, .Typeflag);  != nil {
		return 
	}

	// Write the extended sparse map and setup the sparse writer if necessary.
	if len() > 0 {
		// Use tw.w since the sparse map is not accounted for in hdr.Size.
		if ,  := .w.Write();  != nil {
			return 
		}
		.curr = &sparseFileWriter{.curr, , 0}
	}
	return nil
}

type (
	stringFormatter func([]byte, string)
	numberFormatter func([]byte, int64)
)

// templateV7Plus fills out the V7 fields of a block using values from hdr.
// It also fills out fields (uname, gname, devmajor, devminor) that are
// shared in the USTAR, PAX, and GNU formats using the provided formatters.
//
// The block returned is only valid until the next call to
// templateV7Plus or writeRawFile.
func ( *Writer) ( *Header,  stringFormatter,  numberFormatter) *block {
	.blk.reset()

	 := .ModTime
	if .IsZero() {
		 = time.Unix(0, 0)
	}

	 := .blk.toV7()
	.typeFlag()[0] = .Typeflag
	(.name(), .Name)
	(.linkName(), .Linkname)
	(.mode(), .Mode)
	(.uid(), int64(.Uid))
	(.gid(), int64(.Gid))
	(.size(), .Size)
	(.modTime(), .Unix())

	 := .blk.toUSTAR()
	(.userName(), .Uname)
	(.groupName(), .Gname)
	(.devMajor(), .Devmajor)
	(.devMinor(), .Devminor)

	return &.blk
}

// writeRawFile writes a minimal file with the given name and flag type.
// It uses format to encode the header format and will write data as the body.
// It uses default values for all of the other fields (as BSD and GNU tar does).
func ( *Writer) (,  string,  byte,  Format) error {
	.blk.reset()

	// Best effort for the filename.
	 = toASCII()
	if len() > nameSize {
		 = [:nameSize]
	}
	 = strings.TrimRight(, "/")

	var  formatter
	 := .blk.toV7()
	.typeFlag()[0] = 
	.formatString(.name(), )
	.formatOctal(.mode(), 0)
	.formatOctal(.uid(), 0)
	.formatOctal(.gid(), 0)
	.formatOctal(.size(), int64(len())) // Must be < 8GiB
	.formatOctal(.modTime(), 0)
	.blk.setFormat()
	if .err != nil {
		return .err // Only occurs if size condition is violated
	}

	// Write the header and data.
	if  := .writeRawHeader(&.blk, int64(len()), );  != nil {
		return 
	}
	,  := io.WriteString(, )
	return 
}

// writeRawHeader writes the value of blk, regardless of its value.
// It sets up the Writer such that it can accept a file of the given size.
// If the flag is a special header-only flag, then the size is treated as zero.
func ( *Writer) ( *block,  int64,  byte) error {
	if  := .Flush();  != nil {
		return 
	}
	if ,  := .w.Write([:]);  != nil {
		return 
	}
	if isHeaderOnlyType() {
		 = 0
	}
	.curr = &regFileWriter{.w, }
	.pad = blockPadding()
	return nil
}

// AddFS adds the files from fs.FS to the archive.
// It walks the directory tree starting at the root of the filesystem
// adding each file to the tar archive while maintaining the directory structure.
func ( *Writer) ( fs.FS) error {
	return fs.WalkDir(, ".", func( string,  fs.DirEntry,  error) error {
		if  != nil {
			return 
		}
		if  == "." {
			return nil
		}
		,  := .Info()
		if  != nil {
			return 
		}
		// TODO(#49580): Handle symlinks when fs.ReadLinkFS is available.
		if !.IsDir() && !.Mode().IsRegular() {
			return errors.New("tar: cannot add non-regular file")
		}
		,  := FileInfoHeader(, "")
		if  != nil {
			return 
		}
		.Name = 
		if  := .WriteHeader();  != nil {
			return 
		}
		if .IsDir() {
			return nil
		}
		,  := .Open()
		if  != nil {
			return 
		}
		defer .Close()
		_,  = io.Copy(, )
		return 
	})
}

// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
// If the path is not splittable, then it will return ("", "", false).
func splitUSTARPath( string) (,  string,  bool) {
	 := len()
	if  <= nameSize || !isASCII() {
		return "", "", false
	} else if  > prefixSize+1 {
		 = prefixSize + 1
	} else if [-1] == '/' {
		--
	}

	 := strings.LastIndex([:], "/")
	 := len() -  - 1 // nlen is length of suffix
	 :=                  // plen is length of prefix
	if  <= 0 ||  > nameSize ||  == 0 ||  > prefixSize {
		return "", "", false
	}
	return [:], [+1:], true
}

// Write writes to the current file in the tar archive.
// Write returns the error [ErrWriteTooLong] if more than
// Header.Size bytes are written after [Writer.WriteHeader].
//
// Calling Write on special types like [TypeLink], [TypeSymlink], [TypeChar],
// [TypeBlock], [TypeDir], and [TypeFifo] returns (0, [ErrWriteTooLong]) regardless
// of what the [Header.Size] claims.
func ( *Writer) ( []byte) (int, error) {
	if .err != nil {
		return 0, .err
	}
	,  := .curr.Write()
	if  != nil &&  != ErrWriteTooLong {
		.err = 
	}
	return , 
}

// readFrom populates the content of the current file by reading from r.
// The bytes read must match the number of remaining bytes in the current file.
//
// If the current file is sparse and r is an io.ReadSeeker,
// then readFrom uses Seek to skip past holes defined in Header.SparseHoles,
// assuming that skipped regions are all NULs.
// This always reads the last byte to ensure r is the right size.
//
// TODO(dsnet): Re-export this when adding sparse file support.
// See https://golang.org/issue/22735
func ( *Writer) ( io.Reader) (int64, error) {
	if .err != nil {
		return 0, .err
	}
	,  := .curr.ReadFrom()
	if  != nil &&  != ErrWriteTooLong {
		.err = 
	}
	return , 
}

// Close closes the tar archive by flushing the padding, and writing the footer.
// If the current file (from a prior call to [Writer.WriteHeader]) is not fully written,
// then this returns an error.
func ( *Writer) () error {
	if .err == ErrWriteAfterClose {
		return nil
	}
	if .err != nil {
		return .err
	}

	// Trailer: two zero blocks.
	 := .Flush()
	for  := 0;  < 2 &&  == nil; ++ {
		_,  = .w.Write(zeroBlock[:])
	}

	// Ensure all future actions are invalid.
	.err = ErrWriteAfterClose
	return  // Report IO errors
}

// regFileWriter is a fileWriter for writing data to a regular file entry.
type regFileWriter struct {
	w  io.Writer // Underlying Writer
	nb int64     // Number of remaining bytes to write
}

func ( *regFileWriter) ( []byte) ( int,  error) {
	 := int64(len()) > .nb
	if  {
		 = [:.nb]
	}
	if len() > 0 {
		,  = .w.Write()
		.nb -= int64()
	}
	switch {
	case  != nil:
		return , 
	case :
		return , ErrWriteTooLong
	default:
		return , nil
	}
}

func ( *regFileWriter) ( io.Reader) (int64, error) {
	return io.Copy(struct{ io.Writer }{}, )
}

// logicalRemaining implements fileState.logicalRemaining.
func ( regFileWriter) () int64 {
	return .nb
}

// physicalRemaining implements fileState.physicalRemaining.
func ( regFileWriter) () int64 {
	return .nb
}

// sparseFileWriter is a fileWriter for writing data to a sparse file entry.
type sparseFileWriter struct {
	fw  fileWriter  // Underlying fileWriter
	sp  sparseDatas // Normalized list of data fragments
	pos int64       // Current position in sparse file
}

func ( *sparseFileWriter) ( []byte) ( int,  error) {
	 := int64(len()) > .logicalRemaining()
	if  {
		 = [:.logicalRemaining()]
	}

	 := 
	 := .pos + int64(len())
	for  > .pos &&  == nil {
		var  int // Bytes written in fragment
		,  := .sp[0].Offset, .sp[0].endOffset()
		if .pos <  { // In a hole fragment
			 := [:min(int64(len()), -.pos)]
			,  = zeroWriter{}.Write()
		} else { // In a data fragment
			 := [:min(int64(len()), -.pos)]
			,  = .fw.Write()
		}
		 = [:]
		.pos += int64()
		if .pos >=  && len(.sp) > 1 {
			.sp = .sp[1:] // Ensure last fragment always remains
		}
	}

	 = len() - len()
	switch {
	case  == ErrWriteTooLong:
		return , errMissData // Not possible; implies bug in validation logic
	case  != nil:
		return , 
	case .logicalRemaining() == 0 && .physicalRemaining() > 0:
		return , errUnrefData // Not possible; implies bug in validation logic
	case :
		return , ErrWriteTooLong
	default:
		return , nil
	}
}

func ( *sparseFileWriter) ( io.Reader) ( int64,  error) {
	,  := .(io.ReadSeeker)
	if  {
		if ,  := .Seek(0, io.SeekCurrent);  != nil {
			 = false // Not all io.Seeker can really seek
		}
	}
	if ! {
		return io.Copy(struct{ io.Writer }{}, )
	}

	var  bool
	 := .pos
	for .logicalRemaining() > 0 && ! &&  == nil {
		var  int64 // Size of fragment
		,  := .sp[0].Offset, .sp[0].endOffset()
		if .pos <  { // In a hole fragment
			 =  - .pos
			if .physicalRemaining() == 0 {
				 = true
				--
			}
			_,  = .Seek(, io.SeekCurrent)
		} else { // In a data fragment
			 =  - .pos
			,  = io.CopyN(.fw, , )
		}
		.pos += 
		if .pos >=  && len(.sp) > 1 {
			.sp = .sp[1:] // Ensure last fragment always remains
		}
	}

	// If the last fragment is a hole, then seek to 1-byte before EOF, and
	// read a single byte to ensure the file is the right size.
	if  &&  == nil {
		_,  = mustReadFull(, []byte{0})
		.pos++
	}

	 = .pos - 
	switch {
	case  == io.EOF:
		return , io.ErrUnexpectedEOF
	case  == ErrWriteTooLong:
		return , errMissData // Not possible; implies bug in validation logic
	case  != nil:
		return , 
	case .logicalRemaining() == 0 && .physicalRemaining() > 0:
		return , errUnrefData // Not possible; implies bug in validation logic
	default:
		return , ensureEOF()
	}
}

func ( sparseFileWriter) () int64 {
	return .sp[len(.sp)-1].endOffset() - .pos
}

func ( sparseFileWriter) () int64 {
	return .fw.physicalRemaining()
}

// zeroWriter may only be written with NULs, otherwise it returns errWriteHole.
type zeroWriter struct{}

func (zeroWriter) ( []byte) (int, error) {
	for ,  := range  {
		if  != 0 {
			return , errWriteHole
		}
	}
	return len(), nil
}

// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so.
func ensureEOF( io.Reader) error {
	,  := tryReadFull(, []byte{0})
	switch {
	case  > 0:
		return ErrWriteTooLong
	case  == io.EOF:
		return nil
	default:
		return 
	}
}