package zstd
import (
"io"
)
const debug = false
func (r *Reader ) compressedBlock (blockSize int ) error {
if len (r .compressedBuf ) >= blockSize {
r .compressedBuf = r .compressedBuf [:blockSize ]
} else {
need := blockSize - len (r .compressedBuf )
r .compressedBuf = append (r .compressedBuf , make ([]byte , need )...)
}
if _ , err := io .ReadFull (r .r , r .compressedBuf ); err != nil {
return r .wrapNonEOFError (0 , err )
}
data := block (r .compressedBuf )
off := 0
r .buffer = r .buffer [:0 ]
litoff , litbuf , err := r .readLiterals (data , off , r .literals [:0 ])
if err != nil {
return err
}
r .literals = litbuf
off = litoff
seqCount , off , err := r .initSeqs (data , off )
if err != nil {
return err
}
if seqCount == 0 {
if off < len (data ) {
return r .makeError (off , "extraneous data after no sequences" )
}
r .buffer = append (r .buffer , litbuf ...)
return nil
}
return r .execSeqs (data , off , litbuf , seqCount )
}
type seqCode int
const (
seqLiteral seqCode = iota
seqOffset
seqMatch
)
type seqCodeInfoData struct {
predefTable []fseBaselineEntry
predefTableBits int
maxSym int
maxBits int
toBaseline func (*Reader , int , []fseEntry , []fseBaselineEntry ) error
}
var seqCodeInfo = [3 ]seqCodeInfoData {
seqLiteral : {
predefTable : predefinedLiteralTable [:],
predefTableBits : 6 ,
maxSym : 35 ,
maxBits : 9 ,
toBaseline : (*Reader ).makeLiteralBaselineFSE ,
},
seqOffset : {
predefTable : predefinedOffsetTable [:],
predefTableBits : 5 ,
maxSym : 31 ,
maxBits : 8 ,
toBaseline : (*Reader ).makeOffsetBaselineFSE ,
},
seqMatch : {
predefTable : predefinedMatchTable [:],
predefTableBits : 6 ,
maxSym : 52 ,
maxBits : 9 ,
toBaseline : (*Reader ).makeMatchBaselineFSE ,
},
}
func (r *Reader ) initSeqs (data block , off int ) (int , int , error ) {
if off >= len (data ) {
return 0 , 0 , r .makeEOFError (off )
}
seqHdr := data [off ]
off ++
if seqHdr == 0 {
return 0 , off , nil
}
var seqCount int
if seqHdr < 128 {
seqCount = int (seqHdr )
} else if seqHdr < 255 {
if off >= len (data ) {
return 0 , 0 , r .makeEOFError (off )
}
seqCount = ((int (seqHdr ) - 128 ) << 8 ) + int (data [off ])
off ++
} else {
if off +1 >= len (data ) {
return 0 , 0 , r .makeEOFError (off )
}
seqCount = int (data [off ]) + (int (data [off +1 ]) << 8 ) + 0x7f00
off += 2
}
if off >= len (data ) {
return 0 , 0 , r .makeEOFError (off )
}
symMode := data [off ]
if symMode &3 != 0 {
return 0 , 0 , r .makeError (off , "invalid symbol compression mode" )
}
off ++
var err error
off , err = r .setSeqTable (data , off , seqLiteral , (symMode >>6 )&3 )
if err != nil {
return 0 , 0 , err
}
off , err = r .setSeqTable (data , off , seqOffset , (symMode >>4 )&3 )
if err != nil {
return 0 , 0 , err
}
off , err = r .setSeqTable (data , off , seqMatch , (symMode >>2 )&3 )
if err != nil {
return 0 , 0 , err
}
return seqCount , off , nil
}
func (r *Reader ) setSeqTable (data block , off int , kind seqCode , mode byte ) (int , error ) {
info := &seqCodeInfo [kind ]
switch mode {
case 0 :
r .seqTables [kind ] = info .predefTable
r .seqTableBits [kind ] = uint8 (info .predefTableBits )
return off , nil
case 1 :
if off >= len (data ) {
return 0 , r .makeEOFError (off )
}
rle := data [off ]
off ++
entry := []fseEntry {
{
sym : rle ,
bits : 0 ,
base : 0 ,
},
}
if cap (r .seqTableBuffers [kind ]) == 0 {
r .seqTableBuffers [kind ] = make ([]fseBaselineEntry , 1 <<info .maxBits )
}
r .seqTableBuffers [kind ] = r .seqTableBuffers [kind ][:1 ]
if err := info .toBaseline (r , off , entry , r .seqTableBuffers [kind ]); err != nil {
return 0 , err
}
r .seqTables [kind ] = r .seqTableBuffers [kind ]
r .seqTableBits [kind ] = 0
return off , nil
case 2 :
if cap (r .fseScratch ) < 1 <<info .maxBits {
r .fseScratch = make ([]fseEntry , 1 <<info .maxBits )
}
r .fseScratch = r .fseScratch [:1 <<info .maxBits ]
tableBits , roff , err := r .readFSE (data , off , info .maxSym , info .maxBits , r .fseScratch )
if err != nil {
return 0 , err
}
r .fseScratch = r .fseScratch [:1 <<tableBits ]
if cap (r .seqTableBuffers [kind ]) == 0 {
r .seqTableBuffers [kind ] = make ([]fseBaselineEntry , 1 <<info .maxBits )
}
r .seqTableBuffers [kind ] = r .seqTableBuffers [kind ][:1 <<tableBits ]
if err := info .toBaseline (r , roff , r .fseScratch , r .seqTableBuffers [kind ]); err != nil {
return 0 , err
}
r .seqTables [kind ] = r .seqTableBuffers [kind ]
r .seqTableBits [kind ] = uint8 (tableBits )
return roff , nil
case 3 :
if len (r .seqTables [kind ]) == 0 {
return 0 , r .makeError (off , "missing repeat sequence FSE table" )
}
return off , nil
}
panic ("unreachable" )
}
func (r *Reader ) execSeqs (data block , off int , litbuf []byte , seqCount int ) error {
rbr , err := r .makeReverseBitReader (data , len (data )-1 , off )
if err != nil {
return err
}
literalState , err := rbr .val (r .seqTableBits [seqLiteral ])
if err != nil {
return err
}
offsetState , err := rbr .val (r .seqTableBits [seqOffset ])
if err != nil {
return err
}
matchState , err := rbr .val (r .seqTableBits [seqMatch ])
if err != nil {
return err
}
seq := 0
for seq < seqCount {
if len (r .buffer )+len (litbuf ) > 128 <<10 {
return rbr .makeError ("uncompressed size too big" )
}
ptoffset := &r .seqTables [seqOffset ][offsetState ]
ptmatch := &r .seqTables [seqMatch ][matchState ]
ptliteral := &r .seqTables [seqLiteral ][literalState ]
add , err := rbr .val (ptoffset .basebits )
if err != nil {
return err
}
offset := ptoffset .baseline + add
add , err = rbr .val (ptmatch .basebits )
if err != nil {
return err
}
match := ptmatch .baseline + add
add , err = rbr .val (ptliteral .basebits )
if err != nil {
return err
}
literal := ptliteral .baseline + add
if ptoffset .basebits > 1 {
r .repeatedOffset3 = r .repeatedOffset2
r .repeatedOffset2 = r .repeatedOffset1
r .repeatedOffset1 = offset
} else {
if literal == 0 {
offset ++
}
switch offset {
case 1 :
offset = r .repeatedOffset1
case 2 :
offset = r .repeatedOffset2
r .repeatedOffset2 = r .repeatedOffset1
r .repeatedOffset1 = offset
case 3 :
offset = r .repeatedOffset3
r .repeatedOffset3 = r .repeatedOffset2
r .repeatedOffset2 = r .repeatedOffset1
r .repeatedOffset1 = offset
case 4 :
offset = r .repeatedOffset1 - 1
r .repeatedOffset3 = r .repeatedOffset2
r .repeatedOffset2 = r .repeatedOffset1
r .repeatedOffset1 = offset
}
}
seq ++
if seq < seqCount {
add , err = rbr .val (ptliteral .bits )
if err != nil {
return err
}
literalState = uint32 (ptliteral .base ) + add
add , err = rbr .val (ptmatch .bits )
if err != nil {
return err
}
matchState = uint32 (ptmatch .base ) + add
add , err = rbr .val (ptoffset .bits )
if err != nil {
return err
}
offsetState = uint32 (ptoffset .base ) + add
}
if debug {
println ("literal" , literal , "offset" , offset , "match" , match )
}
if literal > uint32 (len (litbuf )) {
return rbr .makeError ("literal byte overflow" )
}
if literal > 0 {
r .buffer = append (r .buffer , litbuf [:literal ]...)
litbuf = litbuf [literal :]
}
if match > 0 {
if err := r .copyFromWindow (&rbr , offset , match ); err != nil {
return err
}
}
}
r .buffer = append (r .buffer , litbuf ...)
if rbr .cnt != 0 {
return r .makeError (off , "extraneous data after sequences" )
}
return nil
}
func (r *Reader ) copyFromWindow (rbr *reverseBitReader , offset , match uint32 ) error {
if offset == 0 {
return rbr .makeError ("invalid zero offset" )
}
bufferOffset := uint32 (0 )
lenBlock := uint32 (len (r .buffer ))
if lenBlock < offset {
lenWindow := r .window .len ()
copy := offset - lenBlock
if copy > lenWindow {
return rbr .makeError ("offset past window" )
}
windowOffset := lenWindow - copy
if copy > match {
copy = match
}
r .buffer = r .window .appendTo (r .buffer , windowOffset , windowOffset +copy )
match -= copy
} else {
bufferOffset = lenBlock - offset
}
for match > 0 {
copy := uint32 (len (r .buffer )) - bufferOffset
if copy > match {
copy = match
}
r .buffer = append (r .buffer , r .buffer [bufferOffset :bufferOffset +copy ]...)
match -= copy
}
return nil
}
The pages are generated with Golds v0.7.3 . (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu .
PR and bug reports are welcome and can be submitted to the issue list .
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds .