huffman_bit_writer.go 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185
  1. // Copyright 2009 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package flate
  5. import (
  6. "encoding/binary"
  7. "fmt"
  8. "io"
  9. "math"
  10. )
  11. const (
  12. // The largest offset code.
  13. offsetCodeCount = 30
  14. // The special code used to mark the end of a block.
  15. endBlockMarker = 256
  16. // The first length code.
  17. lengthCodesStart = 257
  18. // The number of codegen codes.
  19. codegenCodeCount = 19
  20. badCode = 255
  21. // maxPredefinedTokens is the maximum number of tokens
  22. // where we check if fixed size is smaller.
  23. maxPredefinedTokens = 250
  24. // bufferFlushSize indicates the buffer size
  25. // after which bytes are flushed to the writer.
  26. // Should preferably be a multiple of 6, since
  27. // we accumulate 6 bytes between writes to the buffer.
  28. bufferFlushSize = 246
  29. // bufferSize is the actual output byte buffer size.
  30. // It must have additional headroom for a flush
  31. // which can contain up to 8 bytes.
  32. bufferSize = bufferFlushSize + 8
  33. )
  34. // Minimum length code that emits bits.
  35. const lengthExtraBitsMinCode = 8
  36. // The number of extra bits needed by length code X - LENGTH_CODES_START.
  37. var lengthExtraBits = [32]uint8{
  38. /* 257 */ 0, 0, 0,
  39. /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
  40. /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
  41. /* 280 */ 4, 5, 5, 5, 5, 0,
  42. }
  43. // The length indicated by length code X - LENGTH_CODES_START.
  44. var lengthBase = [32]uint8{
  45. 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
  46. 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
  47. 64, 80, 96, 112, 128, 160, 192, 224, 255,
  48. }
  49. // Minimum offset code that emits bits.
  50. const offsetExtraBitsMinCode = 4
  51. // offset code word extra bits.
  52. var offsetExtraBits = [32]int8{
  53. 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
  54. 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
  55. 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
  56. /* extended window */
  57. 14, 14,
  58. }
  59. var offsetCombined = [32]uint32{}
  60. func init() {
  61. var offsetBase = [32]uint32{
  62. /* normal deflate */
  63. 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
  64. 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
  65. 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
  66. 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
  67. 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
  68. 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
  69. /* extended window */
  70. 0x008000, 0x00c000,
  71. }
  72. for i := range offsetCombined[:] {
  73. // Don't use extended window values...
  74. if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
  75. continue
  76. }
  77. offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
  78. }
  79. }
  80. // The odd order in which the codegen code sizes are written.
  81. var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
  82. type huffmanBitWriter struct {
  83. // writer is the underlying writer.
  84. // Do not use it directly; use the write method, which ensures
  85. // that Write errors are sticky.
  86. writer io.Writer
  87. // Data waiting to be written is bytes[0:nbytes]
  88. // and then the low nbits of bits.
  89. bits uint64
  90. nbits uint8
  91. nbytes uint8
  92. lastHuffMan bool
  93. literalEncoding *huffmanEncoder
  94. tmpLitEncoding *huffmanEncoder
  95. offsetEncoding *huffmanEncoder
  96. codegenEncoding *huffmanEncoder
  97. err error
  98. lastHeader int
  99. // Set between 0 (reused block can be up to 2x the size)
  100. logNewTablePenalty uint
  101. bytes [256 + 8]byte
  102. literalFreq [lengthCodesStart + 32]uint16
  103. offsetFreq [32]uint16
  104. codegenFreq [codegenCodeCount]uint16
  105. // codegen must have an extra space for the final symbol.
  106. codegen [literalCount + offsetCodeCount + 1]uint8
  107. }
  108. // Huffman reuse.
  109. //
  110. // The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
  111. //
  112. // This is controlled by several variables:
  113. //
  114. // If lastHeader is non-zero the Huffman table can be reused.
  115. // This also indicates that a Huffman table has been generated that can output all
  116. // possible symbols.
  117. // It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
  118. // an EOB with the previous table must be written.
  119. //
  120. // If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
  121. //
  122. // An incoming block estimates the output size of a new table using a 'fresh' by calculating the
  123. // optimal size and adding a penalty in 'logNewTablePenalty'.
  124. // A Huffman table is not optimal, which is why we add a penalty, and generating a new table
  125. // is slower both for compression and decompression.
  126. func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
  127. return &huffmanBitWriter{
  128. writer: w,
  129. literalEncoding: newHuffmanEncoder(literalCount),
  130. tmpLitEncoding: newHuffmanEncoder(literalCount),
  131. codegenEncoding: newHuffmanEncoder(codegenCodeCount),
  132. offsetEncoding: newHuffmanEncoder(offsetCodeCount),
  133. }
  134. }
  135. func (w *huffmanBitWriter) reset(writer io.Writer) {
  136. w.writer = writer
  137. w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
  138. w.lastHeader = 0
  139. w.lastHuffMan = false
  140. }
  141. func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
  142. a := t.offHist[:offsetCodeCount]
  143. b := w.offsetEncoding.codes
  144. b = b[:len(a)]
  145. for i, v := range a {
  146. if v != 0 && b[i].zero() {
  147. return false
  148. }
  149. }
  150. a = t.extraHist[:literalCount-256]
  151. b = w.literalEncoding.codes[256:literalCount]
  152. b = b[:len(a)]
  153. for i, v := range a {
  154. if v != 0 && b[i].zero() {
  155. return false
  156. }
  157. }
  158. a = t.litHist[:256]
  159. b = w.literalEncoding.codes[:len(a)]
  160. for i, v := range a {
  161. if v != 0 && b[i].zero() {
  162. return false
  163. }
  164. }
  165. return true
  166. }
  167. func (w *huffmanBitWriter) flush() {
  168. if w.err != nil {
  169. w.nbits = 0
  170. return
  171. }
  172. if w.lastHeader > 0 {
  173. // We owe an EOB
  174. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  175. w.lastHeader = 0
  176. }
  177. n := w.nbytes
  178. for w.nbits != 0 {
  179. w.bytes[n] = byte(w.bits)
  180. w.bits >>= 8
  181. if w.nbits > 8 { // Avoid underflow
  182. w.nbits -= 8
  183. } else {
  184. w.nbits = 0
  185. }
  186. n++
  187. }
  188. w.bits = 0
  189. w.write(w.bytes[:n])
  190. w.nbytes = 0
  191. }
  192. func (w *huffmanBitWriter) write(b []byte) {
  193. if w.err != nil {
  194. return
  195. }
  196. _, w.err = w.writer.Write(b)
  197. }
  198. func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
  199. w.bits |= uint64(b) << (w.nbits & 63)
  200. w.nbits += nb
  201. if w.nbits >= 48 {
  202. w.writeOutBits()
  203. }
  204. }
  205. func (w *huffmanBitWriter) writeBytes(bytes []byte) {
  206. if w.err != nil {
  207. return
  208. }
  209. n := w.nbytes
  210. if w.nbits&7 != 0 {
  211. w.err = InternalError("writeBytes with unfinished bits")
  212. return
  213. }
  214. for w.nbits != 0 {
  215. w.bytes[n] = byte(w.bits)
  216. w.bits >>= 8
  217. w.nbits -= 8
  218. n++
  219. }
  220. if n != 0 {
  221. w.write(w.bytes[:n])
  222. }
  223. w.nbytes = 0
  224. w.write(bytes)
  225. }
  226. // RFC 1951 3.2.7 specifies a special run-length encoding for specifying
  227. // the literal and offset lengths arrays (which are concatenated into a single
  228. // array). This method generates that run-length encoding.
  229. //
  230. // The result is written into the codegen array, and the frequencies
  231. // of each code is written into the codegenFreq array.
  232. // Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
  233. // information. Code badCode is an end marker
  234. //
  235. // numLiterals The number of literals in literalEncoding
  236. // numOffsets The number of offsets in offsetEncoding
  237. // litenc, offenc The literal and offset encoder to use
  238. func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
  239. for i := range w.codegenFreq {
  240. w.codegenFreq[i] = 0
  241. }
  242. // Note that we are using codegen both as a temporary variable for holding
  243. // a copy of the frequencies, and as the place where we put the result.
  244. // This is fine because the output is always shorter than the input used
  245. // so far.
  246. codegen := w.codegen[:] // cache
  247. // Copy the concatenated code sizes to codegen. Put a marker at the end.
  248. cgnl := codegen[:numLiterals]
  249. for i := range cgnl {
  250. cgnl[i] = litEnc.codes[i].len()
  251. }
  252. cgnl = codegen[numLiterals : numLiterals+numOffsets]
  253. for i := range cgnl {
  254. cgnl[i] = offEnc.codes[i].len()
  255. }
  256. codegen[numLiterals+numOffsets] = badCode
  257. size := codegen[0]
  258. count := 1
  259. outIndex := 0
  260. for inIndex := 1; size != badCode; inIndex++ {
  261. // INVARIANT: We have seen "count" copies of size that have not yet
  262. // had output generated for them.
  263. nextSize := codegen[inIndex]
  264. if nextSize == size {
  265. count++
  266. continue
  267. }
  268. // We need to generate codegen indicating "count" of size.
  269. if size != 0 {
  270. codegen[outIndex] = size
  271. outIndex++
  272. w.codegenFreq[size]++
  273. count--
  274. for count >= 3 {
  275. n := 6
  276. if n > count {
  277. n = count
  278. }
  279. codegen[outIndex] = 16
  280. outIndex++
  281. codegen[outIndex] = uint8(n - 3)
  282. outIndex++
  283. w.codegenFreq[16]++
  284. count -= n
  285. }
  286. } else {
  287. for count >= 11 {
  288. n := 138
  289. if n > count {
  290. n = count
  291. }
  292. codegen[outIndex] = 18
  293. outIndex++
  294. codegen[outIndex] = uint8(n - 11)
  295. outIndex++
  296. w.codegenFreq[18]++
  297. count -= n
  298. }
  299. if count >= 3 {
  300. // count >= 3 && count <= 10
  301. codegen[outIndex] = 17
  302. outIndex++
  303. codegen[outIndex] = uint8(count - 3)
  304. outIndex++
  305. w.codegenFreq[17]++
  306. count = 0
  307. }
  308. }
  309. count--
  310. for ; count >= 0; count-- {
  311. codegen[outIndex] = size
  312. outIndex++
  313. w.codegenFreq[size]++
  314. }
  315. // Set up invariant for next time through the loop.
  316. size = nextSize
  317. count = 1
  318. }
  319. // Marker indicating the end of the codegen.
  320. codegen[outIndex] = badCode
  321. }
  322. func (w *huffmanBitWriter) codegens() int {
  323. numCodegens := len(w.codegenFreq)
  324. for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
  325. numCodegens--
  326. }
  327. return numCodegens
  328. }
  329. func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
  330. numCodegens = len(w.codegenFreq)
  331. for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
  332. numCodegens--
  333. }
  334. return 3 + 5 + 5 + 4 + (3 * numCodegens) +
  335. w.codegenEncoding.bitLength(w.codegenFreq[:]) +
  336. int(w.codegenFreq[16])*2 +
  337. int(w.codegenFreq[17])*3 +
  338. int(w.codegenFreq[18])*7, numCodegens
  339. }
  340. // dynamicSize returns the size of dynamically encoded data in bits.
  341. func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
  342. size = litEnc.bitLength(w.literalFreq[:]) +
  343. offEnc.bitLength(w.offsetFreq[:])
  344. return size
  345. }
  346. // dynamicSize returns the size of dynamically encoded data in bits.
  347. func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
  348. header, numCodegens := w.headerSize()
  349. size = header +
  350. litEnc.bitLength(w.literalFreq[:]) +
  351. offEnc.bitLength(w.offsetFreq[:]) +
  352. extraBits
  353. return size, numCodegens
  354. }
  355. // extraBitSize will return the number of bits that will be written
  356. // as "extra" bits on matches.
  357. func (w *huffmanBitWriter) extraBitSize() int {
  358. total := 0
  359. for i, n := range w.literalFreq[257:literalCount] {
  360. total += int(n) * int(lengthExtraBits[i&31])
  361. }
  362. for i, n := range w.offsetFreq[:offsetCodeCount] {
  363. total += int(n) * int(offsetExtraBits[i&31])
  364. }
  365. return total
  366. }
  367. // fixedSize returns the size of dynamically encoded data in bits.
  368. func (w *huffmanBitWriter) fixedSize(extraBits int) int {
  369. return 3 +
  370. fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
  371. fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
  372. extraBits
  373. }
  374. // storedSize calculates the stored size, including header.
  375. // The function returns the size in bits and whether the block
  376. // fits inside a single block.
  377. func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
  378. if in == nil {
  379. return 0, false
  380. }
  381. if len(in) <= maxStoreBlockSize {
  382. return (len(in) + 5) * 8, true
  383. }
  384. return 0, false
  385. }
  386. func (w *huffmanBitWriter) writeCode(c hcode) {
  387. // The function does not get inlined if we "& 63" the shift.
  388. w.bits |= c.code64() << (w.nbits & 63)
  389. w.nbits += c.len()
  390. if w.nbits >= 48 {
  391. w.writeOutBits()
  392. }
  393. }
  394. // writeOutBits will write bits to the buffer.
  395. func (w *huffmanBitWriter) writeOutBits() {
  396. bits := w.bits
  397. w.bits >>= 48
  398. w.nbits -= 48
  399. n := w.nbytes
  400. // We over-write, but faster...
  401. binary.LittleEndian.PutUint64(w.bytes[n:], bits)
  402. n += 6
  403. if n >= bufferFlushSize {
  404. if w.err != nil {
  405. n = 0
  406. return
  407. }
  408. w.write(w.bytes[:n])
  409. n = 0
  410. }
  411. w.nbytes = n
  412. }
  413. // Write the header of a dynamic Huffman block to the output stream.
  414. //
  415. // numLiterals The number of literals specified in codegen
  416. // numOffsets The number of offsets specified in codegen
  417. // numCodegens The number of codegens used in codegen
  418. func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
  419. if w.err != nil {
  420. return
  421. }
  422. var firstBits int32 = 4
  423. if isEof {
  424. firstBits = 5
  425. }
  426. w.writeBits(firstBits, 3)
  427. w.writeBits(int32(numLiterals-257), 5)
  428. w.writeBits(int32(numOffsets-1), 5)
  429. w.writeBits(int32(numCodegens-4), 4)
  430. for i := 0; i < numCodegens; i++ {
  431. value := uint(w.codegenEncoding.codes[codegenOrder[i]].len())
  432. w.writeBits(int32(value), 3)
  433. }
  434. i := 0
  435. for {
  436. var codeWord = uint32(w.codegen[i])
  437. i++
  438. if codeWord == badCode {
  439. break
  440. }
  441. w.writeCode(w.codegenEncoding.codes[codeWord])
  442. switch codeWord {
  443. case 16:
  444. w.writeBits(int32(w.codegen[i]), 2)
  445. i++
  446. case 17:
  447. w.writeBits(int32(w.codegen[i]), 3)
  448. i++
  449. case 18:
  450. w.writeBits(int32(w.codegen[i]), 7)
  451. i++
  452. }
  453. }
  454. }
  455. // writeStoredHeader will write a stored header.
  456. // If the stored block is only used for EOF,
  457. // it is replaced with a fixed huffman block.
  458. func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
  459. if w.err != nil {
  460. return
  461. }
  462. if w.lastHeader > 0 {
  463. // We owe an EOB
  464. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  465. w.lastHeader = 0
  466. }
  467. // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
  468. if length == 0 && isEof {
  469. w.writeFixedHeader(isEof)
  470. // EOB: 7 bits, value: 0
  471. w.writeBits(0, 7)
  472. w.flush()
  473. return
  474. }
  475. var flag int32
  476. if isEof {
  477. flag = 1
  478. }
  479. w.writeBits(flag, 3)
  480. w.flush()
  481. w.writeBits(int32(length), 16)
  482. w.writeBits(int32(^uint16(length)), 16)
  483. }
  484. func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
  485. if w.err != nil {
  486. return
  487. }
  488. if w.lastHeader > 0 {
  489. // We owe an EOB
  490. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  491. w.lastHeader = 0
  492. }
  493. // Indicate that we are a fixed Huffman block
  494. var value int32 = 2
  495. if isEof {
  496. value = 3
  497. }
  498. w.writeBits(value, 3)
  499. }
  500. // writeBlock will write a block of tokens with the smallest encoding.
  501. // The original input can be supplied, and if the huffman encoded data
  502. // is larger than the original bytes, the data will be written as a
  503. // stored block.
  504. // If the input is nil, the tokens will always be Huffman encoded.
  505. func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
  506. if w.err != nil {
  507. return
  508. }
  509. tokens.AddEOB()
  510. if w.lastHeader > 0 {
  511. // We owe an EOB
  512. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  513. w.lastHeader = 0
  514. }
  515. numLiterals, numOffsets := w.indexTokens(tokens, false)
  516. w.generate()
  517. var extraBits int
  518. storedSize, storable := w.storedSize(input)
  519. if storable {
  520. extraBits = w.extraBitSize()
  521. }
  522. // Figure out smallest code.
  523. // Fixed Huffman baseline.
  524. var literalEncoding = fixedLiteralEncoding
  525. var offsetEncoding = fixedOffsetEncoding
  526. var size = math.MaxInt32
  527. if tokens.n < maxPredefinedTokens {
  528. size = w.fixedSize(extraBits)
  529. }
  530. // Dynamic Huffman?
  531. var numCodegens int
  532. // Generate codegen and codegenFrequencies, which indicates how to encode
  533. // the literalEncoding and the offsetEncoding.
  534. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
  535. w.codegenEncoding.generate(w.codegenFreq[:], 7)
  536. dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
  537. if dynamicSize < size {
  538. size = dynamicSize
  539. literalEncoding = w.literalEncoding
  540. offsetEncoding = w.offsetEncoding
  541. }
  542. // Stored bytes?
  543. if storable && storedSize <= size {
  544. w.writeStoredHeader(len(input), eof)
  545. w.writeBytes(input)
  546. return
  547. }
  548. // Huffman.
  549. if literalEncoding == fixedLiteralEncoding {
  550. w.writeFixedHeader(eof)
  551. } else {
  552. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
  553. }
  554. // Write the tokens.
  555. w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
  556. }
  557. // writeBlockDynamic encodes a block using a dynamic Huffman table.
  558. // This should be used if the symbols used have a disproportionate
  559. // histogram distribution.
  560. // If input is supplied and the compression savings are below 1/16th of the
  561. // input size the block is stored.
  562. func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
  563. if w.err != nil {
  564. return
  565. }
  566. sync = sync || eof
  567. if sync {
  568. tokens.AddEOB()
  569. }
  570. // We cannot reuse pure huffman table, and must mark as EOF.
  571. if (w.lastHuffMan || eof) && w.lastHeader > 0 {
  572. // We will not try to reuse.
  573. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  574. w.lastHeader = 0
  575. w.lastHuffMan = false
  576. }
  577. // fillReuse enables filling of empty values.
  578. // This will make encodings always reusable without testing.
  579. // However, this does not appear to benefit on most cases.
  580. const fillReuse = false
  581. // Check if we can reuse...
  582. if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
  583. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  584. w.lastHeader = 0
  585. }
  586. numLiterals, numOffsets := w.indexTokens(tokens, !sync)
  587. extraBits := 0
  588. ssize, storable := w.storedSize(input)
  589. const usePrefs = true
  590. if storable || w.lastHeader > 0 {
  591. extraBits = w.extraBitSize()
  592. }
  593. var size int
  594. // Check if we should reuse.
  595. if w.lastHeader > 0 {
  596. // Estimate size for using a new table.
  597. // Use the previous header size as the best estimate.
  598. newSize := w.lastHeader + tokens.EstimatedBits()
  599. newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty
  600. // The estimated size is calculated as an optimal table.
  601. // We add a penalty to make it more realistic and re-use a bit more.
  602. reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
  603. // Check if a new table is better.
  604. if newSize < reuseSize {
  605. // Write the EOB we owe.
  606. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  607. size = newSize
  608. w.lastHeader = 0
  609. } else {
  610. size = reuseSize
  611. }
  612. if tokens.n < maxPredefinedTokens {
  613. if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
  614. // Check if we get a reasonable size decrease.
  615. if storable && ssize <= size {
  616. w.writeStoredHeader(len(input), eof)
  617. w.writeBytes(input)
  618. return
  619. }
  620. w.writeFixedHeader(eof)
  621. if !sync {
  622. tokens.AddEOB()
  623. }
  624. w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
  625. return
  626. }
  627. }
  628. // Check if we get a reasonable size decrease.
  629. if storable && ssize <= size {
  630. w.writeStoredHeader(len(input), eof)
  631. w.writeBytes(input)
  632. return
  633. }
  634. }
  635. // We want a new block/table
  636. if w.lastHeader == 0 {
  637. if fillReuse && !sync {
  638. w.fillTokens()
  639. numLiterals, numOffsets = maxNumLit, maxNumDist
  640. } else {
  641. w.literalFreq[endBlockMarker] = 1
  642. }
  643. w.generate()
  644. // Generate codegen and codegenFrequencies, which indicates how to encode
  645. // the literalEncoding and the offsetEncoding.
  646. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
  647. w.codegenEncoding.generate(w.codegenFreq[:], 7)
  648. var numCodegens int
  649. if fillReuse && !sync {
  650. // Reindex for accurate size...
  651. w.indexTokens(tokens, true)
  652. }
  653. size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
  654. // Store predefined, if we don't get a reasonable improvement.
  655. if tokens.n < maxPredefinedTokens {
  656. if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
  657. // Store bytes, if we don't get an improvement.
  658. if storable && ssize <= preSize {
  659. w.writeStoredHeader(len(input), eof)
  660. w.writeBytes(input)
  661. return
  662. }
  663. w.writeFixedHeader(eof)
  664. if !sync {
  665. tokens.AddEOB()
  666. }
  667. w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
  668. return
  669. }
  670. }
  671. if storable && ssize <= size {
  672. // Store bytes, if we don't get an improvement.
  673. w.writeStoredHeader(len(input), eof)
  674. w.writeBytes(input)
  675. return
  676. }
  677. // Write Huffman table.
  678. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
  679. if !sync {
  680. w.lastHeader, _ = w.headerSize()
  681. }
  682. w.lastHuffMan = false
  683. }
  684. if sync {
  685. w.lastHeader = 0
  686. }
  687. // Write the tokens.
  688. w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
  689. }
  690. func (w *huffmanBitWriter) fillTokens() {
  691. for i, v := range w.literalFreq[:literalCount] {
  692. if v == 0 {
  693. w.literalFreq[i] = 1
  694. }
  695. }
  696. for i, v := range w.offsetFreq[:offsetCodeCount] {
  697. if v == 0 {
  698. w.offsetFreq[i] = 1
  699. }
  700. }
  701. }
  702. // indexTokens indexes a slice of tokens, and updates
  703. // literalFreq and offsetFreq, and generates literalEncoding
  704. // and offsetEncoding.
  705. // The number of literal and offset tokens is returned.
  706. func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
  707. copy(w.literalFreq[:], t.litHist[:])
  708. copy(w.literalFreq[256:], t.extraHist[:])
  709. copy(w.offsetFreq[:], t.offHist[:offsetCodeCount])
  710. if t.n == 0 {
  711. return
  712. }
  713. if filled {
  714. return maxNumLit, maxNumDist
  715. }
  716. // get the number of literals
  717. numLiterals = len(w.literalFreq)
  718. for w.literalFreq[numLiterals-1] == 0 {
  719. numLiterals--
  720. }
  721. // get the number of offsets
  722. numOffsets = len(w.offsetFreq)
  723. for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
  724. numOffsets--
  725. }
  726. if numOffsets == 0 {
  727. // We haven't found a single match. If we want to go with the dynamic encoding,
  728. // we should count at least one offset to be sure that the offset huffman tree could be encoded.
  729. w.offsetFreq[0] = 1
  730. numOffsets = 1
  731. }
  732. return
  733. }
  734. func (w *huffmanBitWriter) generate() {
  735. w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
  736. w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
  737. }
  738. // writeTokens writes a slice of tokens to the output.
  739. // codes for literal and offset encoding must be supplied.
  740. func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
  741. if w.err != nil {
  742. return
  743. }
  744. if len(tokens) == 0 {
  745. return
  746. }
  747. // Only last token should be endBlockMarker.
  748. var deferEOB bool
  749. if tokens[len(tokens)-1] == endBlockMarker {
  750. tokens = tokens[:len(tokens)-1]
  751. deferEOB = true
  752. }
  753. // Create slices up to the next power of two to avoid bounds checks.
  754. lits := leCodes[:256]
  755. offs := oeCodes[:32]
  756. lengths := leCodes[lengthCodesStart:]
  757. lengths = lengths[:32]
  758. // Go 1.16 LOVES having these on stack.
  759. bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
  760. for _, t := range tokens {
  761. if t < 256 {
  762. //w.writeCode(lits[t.literal()])
  763. c := lits[t]
  764. bits |= c.code64() << (nbits & 63)
  765. nbits += c.len()
  766. if nbits >= 48 {
  767. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  768. //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
  769. bits >>= 48
  770. nbits -= 48
  771. nbytes += 6
  772. if nbytes >= bufferFlushSize {
  773. if w.err != nil {
  774. nbytes = 0
  775. return
  776. }
  777. _, w.err = w.writer.Write(w.bytes[:nbytes])
  778. nbytes = 0
  779. }
  780. }
  781. continue
  782. }
  783. // Write the length
  784. length := t.length()
  785. lengthCode := lengthCode(length) & 31
  786. if false {
  787. w.writeCode(lengths[lengthCode])
  788. } else {
  789. // inlined
  790. c := lengths[lengthCode]
  791. bits |= c.code64() << (nbits & 63)
  792. nbits += c.len()
  793. if nbits >= 48 {
  794. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  795. //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
  796. bits >>= 48
  797. nbits -= 48
  798. nbytes += 6
  799. if nbytes >= bufferFlushSize {
  800. if w.err != nil {
  801. nbytes = 0
  802. return
  803. }
  804. _, w.err = w.writer.Write(w.bytes[:nbytes])
  805. nbytes = 0
  806. }
  807. }
  808. }
  809. if lengthCode >= lengthExtraBitsMinCode {
  810. extraLengthBits := lengthExtraBits[lengthCode]
  811. //w.writeBits(extraLength, extraLengthBits)
  812. extraLength := int32(length - lengthBase[lengthCode])
  813. bits |= uint64(extraLength) << (nbits & 63)
  814. nbits += extraLengthBits
  815. if nbits >= 48 {
  816. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  817. //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
  818. bits >>= 48
  819. nbits -= 48
  820. nbytes += 6
  821. if nbytes >= bufferFlushSize {
  822. if w.err != nil {
  823. nbytes = 0
  824. return
  825. }
  826. _, w.err = w.writer.Write(w.bytes[:nbytes])
  827. nbytes = 0
  828. }
  829. }
  830. }
  831. // Write the offset
  832. offset := t.offset()
  833. offsetCode := (offset >> 16) & 31
  834. if false {
  835. w.writeCode(offs[offsetCode])
  836. } else {
  837. // inlined
  838. c := offs[offsetCode]
  839. bits |= c.code64() << (nbits & 63)
  840. nbits += c.len()
  841. if nbits >= 48 {
  842. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  843. //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
  844. bits >>= 48
  845. nbits -= 48
  846. nbytes += 6
  847. if nbytes >= bufferFlushSize {
  848. if w.err != nil {
  849. nbytes = 0
  850. return
  851. }
  852. _, w.err = w.writer.Write(w.bytes[:nbytes])
  853. nbytes = 0
  854. }
  855. }
  856. }
  857. if offsetCode >= offsetExtraBitsMinCode {
  858. offsetComb := offsetCombined[offsetCode]
  859. //w.writeBits(extraOffset, extraOffsetBits)
  860. bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
  861. nbits += uint8(offsetComb)
  862. if nbits >= 48 {
  863. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  864. //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
  865. bits >>= 48
  866. nbits -= 48
  867. nbytes += 6
  868. if nbytes >= bufferFlushSize {
  869. if w.err != nil {
  870. nbytes = 0
  871. return
  872. }
  873. _, w.err = w.writer.Write(w.bytes[:nbytes])
  874. nbytes = 0
  875. }
  876. }
  877. }
  878. }
  879. // Restore...
  880. w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
  881. if deferEOB {
  882. w.writeCode(leCodes[endBlockMarker])
  883. }
  884. }
  885. // huffOffset is a static offset encoder used for huffman only encoding.
  886. // It can be reused since we will not be encoding offset values.
  887. var huffOffset *huffmanEncoder
  888. func init() {
  889. w := newHuffmanBitWriter(nil)
  890. w.offsetFreq[0] = 1
  891. huffOffset = newHuffmanEncoder(offsetCodeCount)
  892. huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
  893. }
  894. // writeBlockHuff encodes a block of bytes as either
  895. // Huffman encoded literals or uncompressed bytes if the
  896. // results only gains very little from compression.
  897. func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
  898. if w.err != nil {
  899. return
  900. }
  901. // Clear histogram
  902. for i := range w.literalFreq[:] {
  903. w.literalFreq[i] = 0
  904. }
  905. if !w.lastHuffMan {
  906. for i := range w.offsetFreq[:] {
  907. w.offsetFreq[i] = 0
  908. }
  909. }
  910. const numLiterals = endBlockMarker + 1
  911. const numOffsets = 1
  912. // Add everything as literals
  913. // We have to estimate the header size.
  914. // Assume header is around 70 bytes:
  915. // https://stackoverflow.com/a/25454430
  916. const guessHeaderSizeBits = 70 * 8
  917. histogram(input, w.literalFreq[:numLiterals])
  918. ssize, storable := w.storedSize(input)
  919. if storable && len(input) > 1024 {
  920. // Quick check for incompressible content.
  921. abs := float64(0)
  922. avg := float64(len(input)) / 256
  923. max := float64(len(input) * 2)
  924. for _, v := range w.literalFreq[:256] {
  925. diff := float64(v) - avg
  926. abs += diff * diff
  927. if abs > max {
  928. break
  929. }
  930. }
  931. if abs < max {
  932. if debugDeflate {
  933. fmt.Println("stored", abs, "<", max)
  934. }
  935. // No chance we can compress this...
  936. w.writeStoredHeader(len(input), eof)
  937. w.writeBytes(input)
  938. return
  939. }
  940. }
  941. w.literalFreq[endBlockMarker] = 1
  942. w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
  943. estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
  944. if estBits < math.MaxInt32 {
  945. estBits += w.lastHeader
  946. if w.lastHeader == 0 {
  947. estBits += guessHeaderSizeBits
  948. }
  949. estBits += estBits >> w.logNewTablePenalty
  950. }
  951. // Store bytes, if we don't get a reasonable improvement.
  952. if storable && ssize <= estBits {
  953. if debugDeflate {
  954. fmt.Println("stored,", ssize, "<=", estBits)
  955. }
  956. w.writeStoredHeader(len(input), eof)
  957. w.writeBytes(input)
  958. return
  959. }
  960. if w.lastHeader > 0 {
  961. reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256])
  962. if estBits < reuseSize {
  963. if debugDeflate {
  964. fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
  965. }
  966. // We owe an EOB
  967. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  968. w.lastHeader = 0
  969. } else if debugDeflate {
  970. fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
  971. }
  972. }
  973. count := 0
  974. if w.lastHeader == 0 {
  975. // Use the temp encoding, so swap.
  976. w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding
  977. // Generate codegen and codegenFrequencies, which indicates how to encode
  978. // the literalEncoding and the offsetEncoding.
  979. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
  980. w.codegenEncoding.generate(w.codegenFreq[:], 7)
  981. numCodegens := w.codegens()
  982. // Huffman.
  983. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
  984. w.lastHuffMan = true
  985. w.lastHeader, _ = w.headerSize()
  986. if debugDeflate {
  987. count += w.lastHeader
  988. fmt.Println("header:", count/8)
  989. }
  990. }
  991. encoding := w.literalEncoding.codes[:256]
  992. // Go 1.16 LOVES having these on stack. At least 1.5x the speed.
  993. bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
  994. if debugDeflate {
  995. count -= int(nbytes)*8 + int(nbits)
  996. }
  997. // Unroll, write 3 codes/loop.
  998. // Fastest number of unrolls.
  999. for len(input) > 3 {
  1000. // We must have at least 48 bits free.
  1001. if nbits >= 8 {
  1002. n := nbits >> 3
  1003. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  1004. bits >>= (n * 8) & 63
  1005. nbits -= n * 8
  1006. nbytes += n
  1007. }
  1008. if nbytes >= bufferFlushSize {
  1009. if w.err != nil {
  1010. nbytes = 0
  1011. return
  1012. }
  1013. if debugDeflate {
  1014. count += int(nbytes) * 8
  1015. }
  1016. _, w.err = w.writer.Write(w.bytes[:nbytes])
  1017. nbytes = 0
  1018. }
  1019. a, b := encoding[input[0]], encoding[input[1]]
  1020. bits |= a.code64() << (nbits & 63)
  1021. bits |= b.code64() << ((nbits + a.len()) & 63)
  1022. c := encoding[input[2]]
  1023. nbits += b.len() + a.len()
  1024. bits |= c.code64() << (nbits & 63)
  1025. nbits += c.len()
  1026. input = input[3:]
  1027. }
  1028. // Remaining...
  1029. for _, t := range input {
  1030. if nbits >= 48 {
  1031. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  1032. //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
  1033. bits >>= 48
  1034. nbits -= 48
  1035. nbytes += 6
  1036. if nbytes >= bufferFlushSize {
  1037. if w.err != nil {
  1038. nbytes = 0
  1039. return
  1040. }
  1041. if debugDeflate {
  1042. count += int(nbytes) * 8
  1043. }
  1044. _, w.err = w.writer.Write(w.bytes[:nbytes])
  1045. nbytes = 0
  1046. }
  1047. }
  1048. // Bitwriting inlined, ~30% speedup
  1049. c := encoding[t]
  1050. bits |= c.code64() << (nbits & 63)
  1051. nbits += c.len()
  1052. if debugDeflate {
  1053. count += int(c.len())
  1054. }
  1055. }
  1056. // Restore...
  1057. w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
  1058. if debugDeflate {
  1059. nb := count + int(nbytes)*8 + int(nbits)
  1060. fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
  1061. }
  1062. // Flush if needed to have space.
  1063. if w.nbits >= 48 {
  1064. w.writeOutBits()
  1065. }
  1066. if eof || sync {
  1067. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  1068. w.lastHeader = 0
  1069. w.lastHuffMan = false
  1070. }
  1071. }