Updated transitive dependencies
This commit is contained in:
parent
72967395e1
commit
9101680c8b
2
go.mod
2
go.mod
|
@ -4,7 +4,7 @@ go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/h2non/filetype v1.1.3
|
github.com/h2non/filetype v1.1.3
|
||||||
github.com/klauspost/compress v1.16.0
|
github.com/klauspost/compress v1.16.3
|
||||||
github.com/spf13/cobra v1.6.1
|
github.com/spf13/cobra v1.6.1
|
||||||
github.com/yosssi/gohtml v0.0.0-20201013000340-ee4748c638f4
|
github.com/yosssi/gohtml v0.0.0-20201013000340-ee4748c638f4
|
||||||
golang.org/x/image v0.6.0
|
golang.org/x/image v0.6.0
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -4,8 +4,8 @@ github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy
|
||||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
|
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
|
||||||
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
|
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
|
||||||
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
|
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
|
||||||
|
|
|
@ -16,6 +16,21 @@ This package provides various compression algorithms.
|
||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
|
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
|
||||||
|
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
|
||||||
|
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
|
||||||
|
* s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766
|
||||||
|
* zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773
|
||||||
|
* huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774
|
||||||
|
|
||||||
|
* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)
|
||||||
|
* s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685
|
||||||
|
* s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752
|
||||||
|
* s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755
|
||||||
|
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
|
||||||
|
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
|
||||||
|
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
|
||||||
|
|
||||||
* Jan 21st, 2023 (v1.15.15)
|
* Jan 21st, 2023 (v1.15.15)
|
||||||
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
||||||
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
||||||
|
|
|
@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error {
|
||||||
// If the buffer is over-read an error is returned.
|
// If the buffer is over-read an error is returned.
|
||||||
func (s *Scratch) decompress() error {
|
func (s *Scratch) decompress() error {
|
||||||
br := &s.bits
|
br := &s.bits
|
||||||
br.init(s.br.unread())
|
if err := br.init(s.br.unread()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var s1, s2 decoder
|
var s1, s2 decoder
|
||||||
// Initialize and decode first state and symbol.
|
// Initialize and decode first state and symbol.
|
||||||
|
|
|
@ -60,6 +60,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
|
||||||
b.nBits += encA.nBits + encB.nBits
|
b.nBits += encA.nBits + encB.nBits
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// encFourSymbols adds up to 32 bits from four symbols.
|
||||||
|
// It will not check if there is space for them,
|
||||||
|
// so the caller must ensure that b has been flushed recently.
|
||||||
|
func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
|
||||||
|
bitsA := encA.nBits
|
||||||
|
bitsB := bitsA + encB.nBits
|
||||||
|
bitsC := bitsB + encC.nBits
|
||||||
|
bitsD := bitsC + encD.nBits
|
||||||
|
combined := uint64(encA.val) |
|
||||||
|
(uint64(encB.val) << (bitsA & 63)) |
|
||||||
|
(uint64(encC.val) << (bitsB & 63)) |
|
||||||
|
(uint64(encD.val) << (bitsC & 63))
|
||||||
|
b.bitContainer |= combined << (b.nBits & 63)
|
||||||
|
b.nBits += bitsD
|
||||||
|
}
|
||||||
|
|
||||||
// flush32 will flush out, so there are at least 32 bits available for writing.
|
// flush32 will flush out, so there are at least 32 bits available for writing.
|
||||||
func (b *bitWriter) flush32() {
|
func (b *bitWriter) flush32() {
|
||||||
if b.nBits < 32 {
|
if b.nBits < 32 {
|
||||||
|
|
|
@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
|
||||||
tmp := src[n : n+4]
|
tmp := src[n : n+4]
|
||||||
// tmp should be len 4
|
// tmp should be len 4
|
||||||
bw.flush32()
|
bw.flush32()
|
||||||
bw.encTwoSymbols(cTable, tmp[3], tmp[2])
|
bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
|
||||||
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for ; n >= 0; n -= 4 {
|
for ; n >= 0; n -= 4 {
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -442,6 +443,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
|
if debugDecoder {
|
||||||
|
println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
|
||||||
|
}
|
||||||
huff, literals, err = huff0.ReadTable(literals, huff)
|
huff, literals, err = huff0.ReadTable(literals, huff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("reading huffman table:", err)
|
println("reading huffman table:", err)
|
||||||
|
|
|
@ -54,7 +54,7 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
|
||||||
func (b *byteBuf) readByte() (byte, error) {
|
func (b *byteBuf) readByte() (byte, error) {
|
||||||
bb := *b
|
bb := *b
|
||||||
if len(bb) < 1 {
|
if len(bb) < 1 {
|
||||||
return 0, nil
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
r := bb[0]
|
r := bb[0]
|
||||||
*b = bb[1:]
|
*b = bb[1:]
|
||||||
|
|
|
@ -32,7 +32,6 @@ type match struct {
|
||||||
length int32
|
length int32
|
||||||
rep int32
|
rep int32
|
||||||
est int32
|
est int32
|
||||||
_ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const highScore = 25000
|
const highScore = 25000
|
||||||
|
@ -189,12 +188,6 @@ encodeLoop:
|
||||||
panic("offset0 was 0")
|
panic("offset0 was 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
bestOf := func(a, b *match) *match {
|
|
||||||
if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
const goodEnough = 100
|
const goodEnough = 100
|
||||||
|
|
||||||
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
||||||
|
@ -202,40 +195,41 @@ encodeLoop:
|
||||||
candidateL := e.longTable[nextHashL]
|
candidateL := e.longTable[nextHashL]
|
||||||
candidateS := e.table[nextHashS]
|
candidateS := e.table[nextHashS]
|
||||||
|
|
||||||
matchAt := func(offset int32, s int32, first uint32, rep int32) match {
|
// Set m to a match at offset if it looks like that will improve compression.
|
||||||
|
improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
|
||||||
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
|
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
|
||||||
return match{s: s, est: highScore}
|
return
|
||||||
}
|
}
|
||||||
if debugAsserts {
|
if debugAsserts {
|
||||||
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
|
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
|
||||||
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
|
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
|
cand := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
|
||||||
m.estBits(bitsPerByte)
|
cand.estBits(bitsPerByte)
|
||||||
return m
|
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
|
||||||
|
*m = cand
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
best := match{s: s, est: highScore}
|
||||||
m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||||
m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||||
m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||||
best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4))
|
improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
|
||||||
|
|
||||||
if canRepeat && best.length < goodEnough {
|
if canRepeat && best.length < goodEnough {
|
||||||
cv32 := uint32(cv >> 8)
|
cv32 := uint32(cv >> 8)
|
||||||
spp := s + 1
|
spp := s + 1
|
||||||
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
improve(&best, spp-offset1, spp, cv32, 1)
|
||||||
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
improve(&best, spp-offset2, spp, cv32, 2)
|
||||||
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
improve(&best, spp-offset3, spp, cv32, 3)
|
||||||
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
|
||||||
if best.length > 0 {
|
if best.length > 0 {
|
||||||
cv32 = uint32(cv >> 24)
|
cv32 = uint32(cv >> 24)
|
||||||
spp += 2
|
spp += 2
|
||||||
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
improve(&best, spp-offset1, spp, cv32, 1)
|
||||||
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
improve(&best, spp-offset2, spp, cv32, 2)
|
||||||
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
improve(&best, spp-offset3, spp, cv32, 3)
|
||||||
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Load next and check...
|
// Load next and check...
|
||||||
|
@ -262,18 +256,16 @@ encodeLoop:
|
||||||
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
||||||
|
|
||||||
// Short at s+1
|
// Short at s+1
|
||||||
m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||||
// Long at s+1, s+2
|
// Long at s+1, s+2
|
||||||
m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||||
m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||||
m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
|
improve(&best, candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
|
||||||
m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
|
improve(&best, candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
|
||||||
best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
|
|
||||||
if false {
|
if false {
|
||||||
// Short at s+3.
|
// Short at s+3.
|
||||||
// Too often worse...
|
// Too often worse...
|
||||||
m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
|
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
|
||||||
best = bestOf(best, &m)
|
|
||||||
}
|
}
|
||||||
// See if we can find a better match by checking where the current best ends.
|
// See if we can find a better match by checking where the current best ends.
|
||||||
// Use that offset to see if we can find a better full match.
|
// Use that offset to see if we can find a better full match.
|
||||||
|
@ -284,13 +276,10 @@ encodeLoop:
|
||||||
// For this compression level 2 yields the best results.
|
// For this compression level 2 yields the best results.
|
||||||
const skipBeginning = 2
|
const skipBeginning = 2
|
||||||
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
|
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||||
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||||
bestEnd := bestOf(best, &m)
|
|
||||||
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
|
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||||
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||||
bestEnd = bestOf(bestEnd, &m)
|
|
||||||
}
|
}
|
||||||
best = bestEnd
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -314,9 +314,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
||||||
}
|
}
|
||||||
size := ll + ml + len(out)
|
size := ll + ml + len(out)
|
||||||
if size-startSize > maxBlockSize {
|
if size-startSize > maxBlockSize {
|
||||||
if size-startSize == 424242 {
|
|
||||||
panic("here")
|
|
||||||
}
|
|
||||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
}
|
}
|
||||||
if size > cap(out) {
|
if size > cap(out) {
|
||||||
|
@ -427,8 +424,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if space for literals
|
if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
|
||||||
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
|
|
||||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -148,7 +148,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
||||||
s.seqSize += ctx.litRemain
|
s.seqSize += ctx.litRemain
|
||||||
if s.seqSize > maxBlockSize {
|
if s.seqSize > maxBlockSize {
|
||||||
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
|
|
||||||
}
|
}
|
||||||
err := br.close()
|
err := br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -7,7 +7,7 @@ github.com/h2non/filetype/types
|
||||||
# github.com/inconshreveable/mousetrap v1.1.0
|
# github.com/inconshreveable/mousetrap v1.1.0
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
github.com/inconshreveable/mousetrap
|
github.com/inconshreveable/mousetrap
|
||||||
# github.com/klauspost/compress v1.16.0
|
# github.com/klauspost/compress v1.16.3
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
github.com/klauspost/compress
|
github.com/klauspost/compress
|
||||||
github.com/klauspost/compress/fse
|
github.com/klauspost/compress/fse
|
||||||
|
|
Loading…
Reference in New Issue