summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/klauspost/compress/zstd/enc_best.go
blob: 830f5ba74a222318b059015098fef67509ab44af (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.

package zstd

import (
	"bytes"
	"fmt"

	"github.com/klauspost/compress"
)

const (
	bestLongTableBits = 22                     // Bits used in the long match table
	bestLongTableSize = 1 << bestLongTableBits // Size of the table
	bestLongLen       = 8                      // Bytes used for table hash

	// Note: Increasing the short table bits or making the hash shorter
	// can actually lead to compression degradation since it will 'steal' more from the
	// long match table and match offsets are quite big.
	// This greatly depends on the type of input.
	bestShortTableBits = 18                      // Bits used in the short match table
	bestShortTableSize = 1 << bestShortTableBits // Size of the table
	bestShortLen       = 4                       // Bytes used for table hash

)

type match struct {
	offset int32
	s      int32
	length int32
	rep    int32
	est    int32
	_      [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
}

const highScore = 25000

// estBits will estimate output bits from predefined tables.
func (m *match) estBits(bitsPerByte int32) {
	mlc := mlCode(uint32(m.length - zstdMinMatch))
	var ofc uint8
	if m.rep < 0 {
		ofc = ofCode(uint32(m.s-m.offset) + 3)
	} else {
		ofc = ofCode(uint32(m.rep))
	}
	// Cost, excluding
	ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]

	// Add cost of match encoding...
	m.est = int32(ofTT.outBits + mlTT.outBits)
	m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16)
	// Subtract savings compared to literal encoding...
	m.est -= (m.length * bitsPerByte) >> 10
	if m.est > 0 {
		// Unlikely gain..
		m.length = 0
		m.est = highScore
	}
}

// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
// The long match table contains the previous entry with the same hash,
// effectively making it a "chain" of length 2.
// When we find a long match we choose between the two values and select the longest.
// When we find a short match, after checking the long, we check if we can find a long at n+1
// and that it is longer (lazy matching).
type bestFastEncoder struct {
	fastBase
	table         [bestShortTableSize]prevEntry
	longTable     [bestLongTableSize]prevEntry
	dictTable     []prevEntry
	dictLongTable []prevEntry
}

// Encode improves compression...
func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
	const (
		// Input margin is the number of bytes we read (8)
		// and the maximum we will read ahead (2)
		inputMargin            = 8 + 4
		minNonLiteralBlockSize = 16
	)

	// Protect against e.cur wraparound.
	for e.cur >= e.bufferReset-int32(len(e.hist)) {
		if len(e.hist) == 0 {
			e.table = [bestShortTableSize]prevEntry{}
			e.longTable = [bestLongTableSize]prevEntry{}
			e.cur = e.maxMatchOff
			break
		}
		// Shift down everything in the table that isn't already too far away.
		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
		for i := range e.table[:] {
			v := e.table[i].offset
			v2 := e.table[i].prev
			if v < minOff {
				v = 0
				v2 = 0
			} else {
				v = v - e.cur + e.maxMatchOff
				if v2 < minOff {
					v2 = 0
				} else {
					v2 = v2 - e.cur + e.maxMatchOff
				}
			}
			e.table[i] = prevEntry{
				offset: v,
				prev:   v2,
			}
		}
		for i := range e.longTable[:] {
			v := e.longTable[i].offset
			v2 := e.longTable[i].prev
			if v < minOff {
				v = 0
				v2 = 0
			} else {
				v = v - e.cur + e.maxMatchOff
				if v2 < minOff {
					v2 = 0
				} else {
					v2 = v2 - e.cur + e.maxMatchOff
				}
			}
			e.longTable[i] = prevEntry{
				offset: v,
				prev:   v2,
			}
		}
		e.cur = e.maxMatchOff
		break
	}

	s := e.addBlock(src)
	blk.size = len(src)
	if len(src) < minNonLiteralBlockSize {
		blk.extraLits = len(src)
		blk.literals = blk.literals[:len(src)]
		copy(blk.literals, src)
		return
	}

	// Use this to estimate literal cost.
	// Scaled by 10 bits.
	bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src))
	// Huffman can never go < 1 bit/byte
	if bitsPerByte < 1024 {
		bitsPerByte = 1024
	}

	// Override src
	src = e.hist
	sLimit := int32(len(src)) - inputMargin
	const kSearchStrength = 10

	// nextEmit is where in src the next emitLiteral should start from.
	nextEmit := s
	cv := load6432(src, s)

	// Relative offsets
	offset1 := int32(blk.recentOffsets[0])
	offset2 := int32(blk.recentOffsets[1])
	offset3 := int32(blk.recentOffsets[2])

	addLiterals := func(s *seq, until int32) {
		if until == nextEmit {
			return
		}
		blk.literals = append(blk.literals, src[nextEmit:until]...)
		s.litLen = uint32(until - nextEmit)
	}
	_ = addLiterals

	if debugEncoder {
		println("recent offsets:", blk.recentOffsets)
	}

encodeLoop:
	for {
		// We allow the encoder to optionally turn off repeat offsets across blocks
		canRepeat := len(blk.sequences) > 2

		if debugAsserts && canRepeat && offset1 == 0 {
			panic("offset0 was 0")
		}

		bestOf := func(a, b *match) *match {
			if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
				return a
			}
			return b
		}
		const goodEnough = 100

		nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
		nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
		candidateL := e.longTable[nextHashL]
		candidateS := e.table[nextHashS]

		matchAt := func(offset int32, s int32, first uint32, rep int32) match {
			if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
				return match{s: s, est: highScore}
			}
			if debugAsserts {
				if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
					panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
				}
			}
			m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
			m.estBits(bitsPerByte)
			return m
		}

		m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
		m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
		m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
		m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)
		best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4))

		if canRepeat && best.length < goodEnough {
			cv32 := uint32(cv >> 8)
			spp := s + 1
			m1 := matchAt(spp-offset1, spp, cv32, 1)
			m2 := matchAt(spp-offset2, spp, cv32, 2)
			m3 := matchAt(spp-offset3, spp, cv32, 3)
			best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
			if best.length > 0 {
				cv32 = uint32(cv >> 24)
				spp += 2
				m1 := matchAt(spp-offset1, spp, cv32, 1)
				m2 := matchAt(spp-offset2, spp, cv32, 2)
				m3 := matchAt(spp-offset3, spp, cv32, 3)
				best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
			}
		}
		// Load next and check...
		e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
		e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}

		// Look far ahead, unless we have a really long match already...
		if best.length < goodEnough {
			// No match found, move forward on input, no need to check forward...
			if best.length < 4 {
				s += 1 + (s-nextEmit)>>(kSearchStrength-1)
				if s >= sLimit {
					break encodeLoop
				}
				cv = load6432(src, s)
				continue
			}

			s++
			candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
			cv = load6432(src, s)
			cv2 := load6432(src, s+1)
			candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
			candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]

			// Short at s+1
			m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
			// Long at s+1, s+2
			m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
			m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
			m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
			m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
			best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
			if false {
				// Short at s+3.
				// Too often worse...
				m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
				best = bestOf(best, &m)
			}
			// See if we can find a better match by checking where the current best ends.
			// Use that offset to see if we can find a better full match.
			if sAt := best.s + best.length; sAt < sLimit {
				nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
				candidateEnd := e.longTable[nextHashL]
				// Start check at a fixed offset to allow for a few mismatches.
				// For this compression level 2 yields the best results.
				const skipBeginning = 2
				if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
					m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
					bestEnd := bestOf(best, &m)
					if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
						m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
						bestEnd = bestOf(bestEnd, &m)
					}
					best = bestEnd
				}
			}
		}

		if debugAsserts {
			if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
				panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
			}
		}

		// We have a match, we can store the forward value
		if best.rep > 0 {
			s = best.s
			var seq seq
			seq.matchLen = uint32(best.length - zstdMinMatch)

			// We might be able to match backwards.
			// Extend as long as we can.
			start := best.s
			// We end the search early, so we don't risk 0 literals
			// and have to do special offset treatment.
			startLimit := nextEmit + 1

			tMin := s - e.maxMatchOff
			if tMin < 0 {
				tMin = 0
			}
			repIndex := best.offset
			for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
				repIndex--
				start--
				seq.matchLen++
			}
			addLiterals(&seq, start)

			// rep 0
			seq.offset = uint32(best.rep)
			if debugSequences {
				println("repeat sequence", seq, "next s:", s)
			}
			blk.sequences = append(blk.sequences, seq)

			// Index match start+1 (long) -> s - 1
			index0 := s
			s = best.s + best.length

			nextEmit = s
			if s >= sLimit {
				if debugEncoder {
					println("repeat ended", s, best.length)

				}
				break encodeLoop
			}
			// Index skipped...
			off := index0 + e.cur
			for index0 < s-1 {
				cv0 := load6432(src, index0)
				h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
				h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
				e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
				e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
				off++
				index0++
			}
			switch best.rep {
			case 2:
				offset1, offset2 = offset2, offset1
			case 3:
				offset1, offset2, offset3 = offset3, offset1, offset2
			}
			cv = load6432(src, s)
			continue
		}

		// A 4-byte match has been found. Update recent offsets.
		// We'll later see if more than 4 bytes.
		s = best.s
		t := best.offset
		offset1, offset2, offset3 = s-t, offset1, offset2

		if debugAsserts && s <= t {
			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
		}

		if debugAsserts && int(offset1) > len(src) {
			panic("invalid offset")
		}

		// Extend the n-byte match as long as possible.
		l := best.length

		// Extend backwards
		tMin := s - e.maxMatchOff
		if tMin < 0 {
			tMin = 0
		}
		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
			s--
			t--
			l++
		}

		// Write our sequence
		var seq seq
		seq.litLen = uint32(s - nextEmit)
		seq.matchLen = uint32(l - zstdMinMatch)
		if seq.litLen > 0 {
			blk.literals = append(blk.literals, src[nextEmit:s]...)
		}
		seq.offset = uint32(s-t) + 3
		s += l
		if debugSequences {
			println("sequence", seq, "next s:", s)
		}
		blk.sequences = append(blk.sequences, seq)
		nextEmit = s
		if s >= sLimit {
			break encodeLoop
		}

		// Index match start+1 (long) -> s - 1
		index0 := s - l + 1
		// every entry
		for index0 < s-1 {
			cv0 := load6432(src, index0)
			h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
			h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
			off := index0 + e.cur
			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
			e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
			index0++
		}

		cv = load6432(src, s)
		if !canRepeat {
			continue
		}

		// Check offset 2
		for {
			o2 := s - offset2
			if load3232(src, o2) != uint32(cv) {
				// Do regular search
				break
			}

			// Store this, since we have it.
			nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
			nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)

			// We have at least 4 byte match.
			// No need to check backwards. We come straight from a match
			l := 4 + e.matchlen(s+4, o2+4, src)

			e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
			e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
			seq.matchLen = uint32(l) - zstdMinMatch
			seq.litLen = 0

			// Since litlen is always 0, this is offset 1.
			seq.offset = 1
			s += l
			nextEmit = s
			if debugSequences {
				println("sequence", seq, "next s:", s)
			}
			blk.sequences = append(blk.sequences, seq)

			// Swap offset 1 and 2.
			offset1, offset2 = offset2, offset1
			if s >= sLimit {
				// Finished
				break encodeLoop
			}
			cv = load6432(src, s)
		}
	}

	if int(nextEmit) < len(src) {
		blk.literals = append(blk.literals, src[nextEmit:]...)
		blk.extraLits = len(src) - int(nextEmit)
	}
	blk.recentOffsets[0] = uint32(offset1)
	blk.recentOffsets[1] = uint32(offset2)
	blk.recentOffsets[2] = uint32(offset3)
	if debugEncoder {
		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
	}
}

// EncodeNoHist will encode a block with no history and no following blocks.
// Most notable difference is that src will not be copied for history and
// we do not need to check for max match length.
func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
	e.ensureHist(len(src))
	e.Encode(blk, src)
}

// Reset will reset and set a dictionary if not nil
func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
	e.resetBase(d, singleBlock)
	if d == nil {
		return
	}
	// Init or copy dict table
	if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
		if len(e.dictTable) != len(e.table) {
			e.dictTable = make([]prevEntry, len(e.table))
		}
		end := int32(len(d.content)) - 8 + e.maxMatchOff
		for i := e.maxMatchOff; i < end; i += 4 {
			const hashLog = bestShortTableBits

			cv := load6432(d.content, i-e.maxMatchOff)
			nextHash := hashLen(cv, hashLog, bestShortLen)      // 0 -> 4
			nextHash1 := hashLen(cv>>8, hashLog, bestShortLen)  // 1 -> 5
			nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6
			nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7
			e.dictTable[nextHash] = prevEntry{
				prev:   e.dictTable[nextHash].offset,
				offset: i,
			}
			e.dictTable[nextHash1] = prevEntry{
				prev:   e.dictTable[nextHash1].offset,
				offset: i + 1,
			}
			e.dictTable[nextHash2] = prevEntry{
				prev:   e.dictTable[nextHash2].offset,
				offset: i + 2,
			}
			e.dictTable[nextHash3] = prevEntry{
				prev:   e.dictTable[nextHash3].offset,
				offset: i + 3,
			}
		}
		e.lastDictID = d.id
	}

	// Init or copy dict table
	if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
		if len(e.dictLongTable) != len(e.longTable) {
			e.dictLongTable = make([]prevEntry, len(e.longTable))
		}
		if len(d.content) >= 8 {
			cv := load6432(d.content, 0)
			h := hashLen(cv, bestLongTableBits, bestLongLen)
			e.dictLongTable[h] = prevEntry{
				offset: e.maxMatchOff,
				prev:   e.dictLongTable[h].offset,
			}

			end := int32(len(d.content)) - 8 + e.maxMatchOff
			off := 8 // First to read
			for i := e.maxMatchOff + 1; i < end; i++ {
				cv = cv>>8 | (uint64(d.content[off]) << 56)
				h := hashLen(cv, bestLongTableBits, bestLongLen)
				e.dictLongTable[h] = prevEntry{
					offset: i,
					prev:   e.dictLongTable[h].offset,
				}
				off++
			}
		}
		e.lastDictID = d.id
	}
	// Reset table to initial state
	copy(e.longTable[:], e.dictLongTable)

	e.cur = e.maxMatchOff
	// Reset table to initial state
	copy(e.table[:], e.dictTable)
}