s2.go 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. // Copyright 2011 The Snappy-Go Authors. All rights reserved.
  2. // Copyright (c) 2019 Klaus Post. All rights reserved.
  3. // Use of this source code is governed by a BSD-style
  4. // license that can be found in the LICENSE file.
  5. // Package s2 implements the S2 compression format.
  6. //
  7. // S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput,
  8. // which is why it features concurrent compression for bigger payloads.
  9. //
  10. // Decoding is compatible with Snappy compressed content,
  11. // but content compressed with S2 cannot be decompressed by Snappy.
  12. //
  13. // For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2
  14. //
  15. // There are actually two S2 formats: block and stream. They are related,
  16. // but different: trying to decompress block-compressed data as a S2 stream
  17. // will fail, and vice versa. The block format is the Decode and Encode
  18. // functions and the stream format is the Reader and Writer types.
  19. //
  20. // A "better" compression option is available. This will trade some compression
  21. // speed
  22. //
  23. // The block format, the more common case, is used when the complete size (the
  24. // number of bytes) of the original data is known upfront, at the time
  25. // compression starts. The stream format, also known as the framing format, is
  26. // for when that isn't always true.
  27. //
  28. // Blocks to not offer much data protection, so it is up to you to
  29. // add data validation of decompressed blocks.
  30. //
  31. // Streams perform CRC validation of the decompressed data.
  32. // Stream compression will also be performed on multiple CPU cores concurrently
  33. // significantly improving throughput.
  34. package s2
  35. import (
  36. "bytes"
  37. "hash/crc32"
  38. "github.com/klauspost/compress/internal/race"
  39. )
  40. /*
  41. Each encoded block begins with the varint-encoded length of the decoded data,
  42. followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
  43. first byte of each chunk is broken into its 2 least and 6 most significant bits
  44. called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
  45. Zero means a literal tag. All other values mean a copy tag.
  46. For literal tags:
  47. - If m < 60, the next 1 + m bytes are literal bytes.
  48. - Otherwise, let n be the little-endian unsigned integer denoted by the next
  49. m - 59 bytes. The next 1 + n bytes after that are literal bytes.
  50. For copy tags, length bytes are copied from offset bytes ago, in the style of
  51. Lempel-Ziv compression algorithms. In particular:
  52. - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
  53. The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
  54. of the offset. The next byte is bits 0-7 of the offset.
  55. - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
  56. The length is 1 + m. The offset is the little-endian unsigned integer
  57. denoted by the next 2 bytes.
  58. - For l == 3, the offset ranges in [0, 1<<32) and the length in
  59. [1, 65). The length is 1 + m. The offset is the little-endian unsigned
  60. integer denoted by the next 4 bytes.
  61. */
  62. const (
  63. tagLiteral = 0x00
  64. tagCopy1 = 0x01
  65. tagCopy2 = 0x02
  66. tagCopy4 = 0x03
  67. )
  68. const (
  69. checksumSize = 4
  70. chunkHeaderSize = 4
  71. magicChunk = "\xff\x06\x00\x00" + magicBody
  72. magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy
  73. magicBodySnappy = "sNaPpY"
  74. magicBody = "S2sTwO"
  75. // maxBlockSize is the maximum size of the input to encodeBlock.
  76. //
  77. // For the framing format (Writer type instead of Encode function),
  78. // this is the maximum uncompressed size of a block.
  79. maxBlockSize = 4 << 20
  80. // minBlockSize is the minimum size of block setting when creating a writer.
  81. minBlockSize = 4 << 10
  82. skippableFrameHeader = 4
  83. maxChunkSize = 1<<24 - 1 // 16777215
  84. // Default block size
  85. defaultBlockSize = 1 << 20
  86. // maxSnappyBlockSize is the maximum snappy block size.
  87. maxSnappyBlockSize = 1 << 16
  88. obufHeaderLen = checksumSize + chunkHeaderSize
  89. )
  90. const (
  91. chunkTypeCompressedData = 0x00
  92. chunkTypeUncompressedData = 0x01
  93. ChunkTypeIndex = 0x99
  94. chunkTypePadding = 0xfe
  95. chunkTypeStreamIdentifier = 0xff
  96. )
  97. var (
  98. crcTable = crc32.MakeTable(crc32.Castagnoli)
  99. magicChunkSnappyBytes = []byte(magicChunkSnappy) // Can be passed to functions where it escapes.
  100. magicChunkBytes = []byte(magicChunk) // Can be passed to functions where it escapes.
  101. )
  102. // crc implements the checksum specified in section 3 of
  103. // https://github.com/google/snappy/blob/master/framing_format.txt
  104. func crc(b []byte) uint32 {
  105. race.ReadSlice(b)
  106. c := crc32.Update(0, crcTable, b)
  107. return c>>15 | c<<17 + 0xa282ead8
  108. }
  109. // literalExtraSize returns the extra size of encoding n literals.
  110. // n should be >= 0 and <= math.MaxUint32.
  111. func literalExtraSize(n int64) int64 {
  112. if n == 0 {
  113. return 0
  114. }
  115. switch {
  116. case n < 60:
  117. return 1
  118. case n < 1<<8:
  119. return 2
  120. case n < 1<<16:
  121. return 3
  122. case n < 1<<24:
  123. return 4
  124. default:
  125. return 5
  126. }
  127. }
  128. type byter interface {
  129. Bytes() []byte
  130. }
  131. var _ byter = &bytes.Buffer{}