fast_encoder.go 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. // Copyright 2011 The Snappy-Go Authors. All rights reserved.
  2. // Modified for deflate by Klaus Post (c) 2015.
  3. // Use of this source code is governed by a BSD-style
  4. // license that can be found in the LICENSE file.
  5. package flate
  6. import (
  7. "fmt"
  8. "math/bits"
  9. "github.com/klauspost/compress/internal/le"
  10. )
  11. type fastEnc interface {
  12. Encode(dst *tokens, src []byte)
  13. Reset()
  14. }
  15. func newFastEnc(level int) fastEnc {
  16. switch level {
  17. case 1:
  18. return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
  19. case 2:
  20. return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
  21. case 3:
  22. return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
  23. case 4:
  24. return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
  25. case 5:
  26. return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
  27. case 6:
  28. return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
  29. default:
  30. panic("invalid level specified")
  31. }
  32. }
  33. const (
  34. tableBits = 15 // Bits used in the table
  35. tableSize = 1 << tableBits // Size of the table
  36. tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
  37. baseMatchOffset = 1 // The smallest match offset
  38. baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
  39. maxMatchOffset = 1 << 15 // The largest match offset
  40. bTableBits = 17 // Bits used in the big tables
  41. bTableSize = 1 << bTableBits // Size of the table
  42. allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history.
  43. bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
  44. )
  45. const (
  46. prime3bytes = 506832829
  47. prime4bytes = 2654435761
  48. prime5bytes = 889523592379
  49. prime6bytes = 227718039650203
  50. prime7bytes = 58295818150454627
  51. prime8bytes = 0xcf1bbcdcb7a56463
  52. )
  53. func load3232(b []byte, i int32) uint32 {
  54. return le.Load32(b, i)
  55. }
  56. func load6432(b []byte, i int32) uint64 {
  57. return le.Load64(b, i)
  58. }
  59. type tableEntry struct {
  60. offset int32
  61. }
  62. // fastGen maintains the table for matches,
  63. // and the previous byte block for level 2.
  64. // This is the generic implementation.
  65. type fastGen struct {
  66. hist []byte
  67. cur int32
  68. }
  69. func (e *fastGen) addBlock(src []byte) int32 {
  70. // check if we have space already
  71. if len(e.hist)+len(src) > cap(e.hist) {
  72. if cap(e.hist) == 0 {
  73. e.hist = make([]byte, 0, allocHistory)
  74. } else {
  75. if cap(e.hist) < maxMatchOffset*2 {
  76. panic("unexpected buffer size")
  77. }
  78. // Move down
  79. offset := int32(len(e.hist)) - maxMatchOffset
  80. // copy(e.hist[0:maxMatchOffset], e.hist[offset:])
  81. *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
  82. e.cur += offset
  83. e.hist = e.hist[:maxMatchOffset]
  84. }
  85. }
  86. s := int32(len(e.hist))
  87. e.hist = append(e.hist, src...)
  88. return s
  89. }
  90. type tableEntryPrev struct {
  91. Cur tableEntry
  92. Prev tableEntry
  93. }
  94. // hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
  95. // Preferably h should be a constant and should always be <64.
  96. func hash7(u uint64, h uint8) uint32 {
  97. return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
  98. }
  99. // hashLen returns a hash of the lowest mls bytes of with length output bits.
  100. // mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
  101. // length should always be < 32.
  102. // Preferably length and mls should be a constant for inlining.
  103. func hashLen(u uint64, length, mls uint8) uint32 {
  104. switch mls {
  105. case 3:
  106. return (uint32(u<<8) * prime3bytes) >> (32 - length)
  107. case 5:
  108. return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
  109. case 6:
  110. return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
  111. case 7:
  112. return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
  113. case 8:
  114. return uint32((u * prime8bytes) >> (64 - length))
  115. default:
  116. return (uint32(u) * prime4bytes) >> (32 - length)
  117. }
  118. }
  119. // matchlen will return the match length between offsets and t in src.
  120. // The maximum length returned is maxMatchLength - 4.
  121. // It is assumed that s > t, that t >=0 and s < len(src).
  122. func (e *fastGen) matchlen(s, t int, src []byte) int32 {
  123. if debugDeflate {
  124. if t >= s {
  125. panic(fmt.Sprint("t >=s:", t, s))
  126. }
  127. if int(s) >= len(src) {
  128. panic(fmt.Sprint("s >= len(src):", s, len(src)))
  129. }
  130. if t < 0 {
  131. panic(fmt.Sprint("t < 0:", t))
  132. }
  133. if s-t > maxMatchOffset {
  134. panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
  135. }
  136. }
  137. s1 := min(s+maxMatchLength-4, len(src))
  138. left := s1 - s
  139. n := int32(0)
  140. for left >= 8 {
  141. diff := le.Load64(src, s) ^ le.Load64(src, t)
  142. if diff != 0 {
  143. return n + int32(bits.TrailingZeros64(diff)>>3)
  144. }
  145. s += 8
  146. t += 8
  147. n += 8
  148. left -= 8
  149. }
  150. a := src[s:s1]
  151. b := src[t:]
  152. for i := range a {
  153. if a[i] != b[i] {
  154. break
  155. }
  156. n++
  157. }
  158. return n
  159. }
  160. // matchlenLong will return the match length between offsets and t in src.
  161. // It is assumed that s > t, that t >=0 and s < len(src).
  162. func (e *fastGen) matchlenLong(s, t int, src []byte) int32 {
  163. if debugDeflate {
  164. if t >= s {
  165. panic(fmt.Sprint("t >=s:", t, s))
  166. }
  167. if int(s) >= len(src) {
  168. panic(fmt.Sprint("s >= len(src):", s, len(src)))
  169. }
  170. if t < 0 {
  171. panic(fmt.Sprint("t < 0:", t))
  172. }
  173. if s-t > maxMatchOffset {
  174. panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
  175. }
  176. }
  177. // Extend the match to be as long as possible.
  178. left := len(src) - s
  179. n := int32(0)
  180. for left >= 8 {
  181. diff := le.Load64(src, s) ^ le.Load64(src, t)
  182. if diff != 0 {
  183. return n + int32(bits.TrailingZeros64(diff)>>3)
  184. }
  185. s += 8
  186. t += 8
  187. n += 8
  188. left -= 8
  189. }
  190. a := src[s:]
  191. b := src[t:]
  192. for i := range a {
  193. if a[i] != b[i] {
  194. break
  195. }
  196. n++
  197. }
  198. return n
  199. }
  200. // Reset the encoding table.
  201. func (e *fastGen) Reset() {
  202. if cap(e.hist) < allocHistory {
  203. e.hist = make([]byte, 0, allocHistory)
  204. }
  205. // We offset current position so everything will be out of reach.
  206. // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
  207. if e.cur <= bufferReset {
  208. e.cur += maxMatchOffset + int32(len(e.hist))
  209. }
  210. e.hist = e.hist[:0]
  211. }