Updated transitive dependencies

This commit is contained in:
Seednode 2023-11-15 11:03:43 -06:00
parent aa6cb2aeef
commit 78a29b87fc
7 changed files with 49 additions and 32 deletions

2
go.mod
View File

@ -5,7 +5,7 @@ go 1.21
require ( require (
github.com/alecthomas/chroma/v2 v2.11.1 github.com/alecthomas/chroma/v2 v2.11.1
github.com/julienschmidt/httprouter v1.3.0 github.com/julienschmidt/httprouter v1.3.0
github.com/klauspost/compress v1.17.2 github.com/klauspost/compress v1.17.3
github.com/spf13/cobra v1.8.0 github.com/spf13/cobra v1.8.0
github.com/yosssi/gohtml v0.0.0-20201013000340-ee4748c638f4 github.com/yosssi/gohtml v0.0.0-20201013000340-ee4748c638f4
golang.org/x/image v0.14.0 golang.org/x/image v0.14.0

4
go.sum
View File

@ -13,8 +13,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=

View File

@ -16,6 +16,14 @@ This package provides various compression algorithms.
# changelog # changelog
* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
* s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
* s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) * Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
* Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838

View File

@ -212,7 +212,7 @@ func (s *Scratch) writeCount() error {
previous0 bool previous0 bool
charnum uint16 charnum uint16
maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3
// Write Table Size // Write Table Size
bitStream = uint32(tableLog - minTablelog) bitStream = uint32(tableLog - minTablelog)

View File

@ -43,7 +43,7 @@ func (m *match) estBits(bitsPerByte int32) {
if m.rep < 0 { if m.rep < 0 {
ofc = ofCode(uint32(m.s-m.offset) + 3) ofc = ofCode(uint32(m.s-m.offset) + 3)
} else { } else {
ofc = ofCode(uint32(m.rep)) ofc = ofCode(uint32(m.rep) & 3)
} }
// Cost, excluding // Cost, excluding
ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
@ -227,7 +227,7 @@ encodeLoop:
} }
} }
l := 4 + e.matchlen(s+4, offset+4, src) l := 4 + e.matchlen(s+4, offset+4, src)
if rep < 0 { if true {
// Extend candidate match backwards as far as possible. // Extend candidate match backwards as far as possible.
tMin := s - e.maxMatchOff tMin := s - e.maxMatchOff
if tMin < 0 { if tMin < 0 {
@ -282,6 +282,7 @@ encodeLoop:
// Load next and check... // Load next and check...
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
index0 := s + 1
// Look far ahead, unless we have a really long match already... // Look far ahead, unless we have a really long match already...
if best.length < goodEnough { if best.length < goodEnough {
@ -357,19 +358,16 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
// Index old s + 1 -> s - 1 // Index old s + 1 -> s - 1
index0 := s + 1
s = best.s + best.length s = best.s + best.length
nextEmit = s nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
}
break encodeLoop
}
// Index skipped... // Index skipped...
end := s
if s > sLimit+4 {
end = sLimit + 4
}
off := index0 + e.cur off := index0 + e.cur
for index0 < s { for index0 < end {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@ -378,6 +376,7 @@ encodeLoop:
off++ off++
index0++ index0++
} }
switch best.rep { switch best.rep {
case 2, 4 | 1: case 2, 4 | 1:
offset1, offset2 = offset2, offset1 offset1, offset2 = offset2, offset1
@ -386,12 +385,17 @@ encodeLoop:
case 4 | 3: case 4 | 3:
offset1, offset2, offset3 = offset1-1, offset1, offset2 offset1, offset2, offset3 = offset1-1, offset1, offset2
} }
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
}
break encodeLoop
}
continue continue
} }
// A 4-byte match has been found. Update recent offsets. // A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes. // We'll later see if more than 4 bytes.
index0 := s + 1
s = best.s s = best.s
t := best.offset t := best.offset
offset1, offset2, offset3 = s-t, offset1, offset2 offset1, offset2, offset3 = s-t, offset1, offset2
@ -419,19 +423,25 @@ encodeLoop:
} }
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
nextEmit = s nextEmit = s
if s >= sLimit {
break encodeLoop // Index old s + 1 -> s - 1 or sLimit
end := s
if s > sLimit-4 {
end = sLimit - 4
} }
// Index old s + 1 -> s - 1 off := index0 + e.cur
for index0 < s { for index0 < end {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
index0++ index0++
off++
}
if s >= sLimit {
break encodeLoop
} }
} }

View File

@ -145,7 +145,7 @@ encodeLoop:
var t int32 var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks // We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2 canRepeat := len(blk.sequences) > 2
var matched int32 var matched, index0 int32
for { for {
if debugAsserts && canRepeat && offset1 == 0 { if debugAsserts && canRepeat && offset1 == 0 {
@ -162,6 +162,7 @@ encodeLoop:
off := s + e.cur off := s + e.cur
e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
index0 = s + 1
if canRepeat { if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
@ -258,7 +259,6 @@ encodeLoop:
} }
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
index0 := s + repOff2
s += lenght + repOff2 s += lenght + repOff2
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
@ -498,15 +498,15 @@ encodeLoop:
} }
// Index match start+1 (long) -> s - 1 // Index match start+1 (long) -> s - 1
index0 := s - l + 1 off := index0 + e.cur
for index0 < s-1 { for index0 < s-1 {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
cv1 := cv0 >> 8 cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen) h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2 index0 += 2
off += 2
} }
cv = load6432(src, s) cv = load6432(src, s)
@ -672,7 +672,7 @@ encodeLoop:
var t int32 var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks // We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2 canRepeat := len(blk.sequences) > 2
var matched int32 var matched, index0 int32
for { for {
if debugAsserts && canRepeat && offset1 == 0 { if debugAsserts && canRepeat && offset1 == 0 {
@ -691,6 +691,7 @@ encodeLoop:
e.markLongShardDirty(nextHashL) e.markLongShardDirty(nextHashL)
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
e.markShortShardDirty(nextHashS) e.markShortShardDirty(nextHashS)
index0 = s + 1
if canRepeat { if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
@ -726,7 +727,6 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1 // Index match start+1 (long) -> s - 1
index0 := s + repOff
s += lenght + repOff s += lenght + repOff
nextEmit = s nextEmit = s
@ -790,7 +790,6 @@ encodeLoop:
} }
blk.sequences = append(blk.sequences, seq) blk.sequences = append(blk.sequences, seq)
index0 := s + repOff2
s += lenght + repOff2 s += lenght + repOff2
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
@ -1024,18 +1023,18 @@ encodeLoop:
} }
// Index match start+1 (long) -> s - 1 // Index match start+1 (long) -> s - 1
index0 := s - l + 1 off := index0 + e.cur
for index0 < s-1 { for index0 < s-1 {
cv0 := load6432(src, index0) cv0 := load6432(src, index0)
cv1 := cv0 >> 8 cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen) h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0) e.markLongShardDirty(h0)
h1 := hashLen(cv1, betterShortTableBits, betterShortLen) h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1) e.markShortShardDirty(h1)
index0 += 2 index0 += 2
off += 2
} }
cv = load6432(src, s) cv = load6432(src, s)

4
vendor/modules.txt vendored
View File

@ -14,8 +14,8 @@ github.com/inconshreveable/mousetrap
# github.com/julienschmidt/httprouter v1.3.0 # github.com/julienschmidt/httprouter v1.3.0
## explicit; go 1.7 ## explicit; go 1.7
github.com/julienschmidt/httprouter github.com/julienschmidt/httprouter
# github.com/klauspost/compress v1.17.2 # github.com/klauspost/compress v1.17.3
## explicit; go 1.18 ## explicit; go 1.19
github.com/klauspost/compress github.com/klauspost/compress
github.com/klauspost/compress/fse github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0