builder.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. /*
  2. * Copyright 2017 Dgraph Labs, Inc. and Contributors
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. package table
  17. import (
  18. "crypto/aes"
  19. "math"
  20. "runtime"
  21. "sync"
  22. "sync/atomic"
  23. "unsafe"
  24. fbs "github.com/google/flatbuffers/go"
  25. "github.com/klauspost/compress/s2"
  26. "github.com/pkg/errors"
  27. "google.golang.org/protobuf/proto"
  28. "github.com/dgraph-io/badger/v4/fb"
  29. "github.com/dgraph-io/badger/v4/options"
  30. "github.com/dgraph-io/badger/v4/pb"
  31. "github.com/dgraph-io/badger/v4/y"
  32. "github.com/dgraph-io/ristretto/v2/z"
  33. )
  34. const (
  35. KB = 1024
  36. MB = KB * 1024
  37. // When a block is encrypted, it's length increases. We add 256 bytes of padding to
  38. // handle cases when block size increases. This is an approximate number.
  39. padding = 256
  40. )
  41. type header struct {
  42. overlap uint16 // Overlap with base key.
  43. diff uint16 // Length of the diff.
  44. }
  45. const headerSize = uint16(unsafe.Sizeof(header{}))
  46. // Encode encodes the header.
  47. func (h header) Encode() []byte {
  48. var b [4]byte
  49. *(*header)(unsafe.Pointer(&b[0])) = h
  50. return b[:]
  51. }
  52. // Decode decodes the header.
  53. func (h *header) Decode(buf []byte) {
  54. // Copy over data from buf into h. Using *h=unsafe.pointer(...) leads to
  55. // pointer alignment issues. See https://github.com/dgraph-io/badger/issues/1096
  56. // and comment https://github.com/dgraph-io/badger/pull/1097#pullrequestreview-307361714
  57. copy(((*[headerSize]byte)(unsafe.Pointer(h))[:]), buf[:headerSize])
  58. }
  59. // bblock represents a block that is being compressed/encrypted in the background.
  60. type bblock struct {
  61. data []byte
  62. baseKey []byte // Base key for the current block.
  63. entryOffsets []uint32 // Offsets of entries present in current block.
  64. end int // Points to the end offset of the block.
  65. }
  66. // Builder is used in building a table.
  67. type Builder struct {
  68. // Typically tens or hundreds of meg. This is for one single file.
  69. alloc *z.Allocator
  70. curBlock *bblock
  71. compressedSize atomic.Uint32
  72. uncompressedSize atomic.Uint32
  73. lenOffsets uint32
  74. keyHashes []uint32 // Used for building the bloomfilter.
  75. opts *Options
  76. maxVersion uint64
  77. onDiskSize uint32
  78. staleDataSize int
  79. // Used to concurrently compress/encrypt blocks.
  80. wg sync.WaitGroup
  81. blockChan chan *bblock
  82. blockList []*bblock
  83. }
  84. func (b *Builder) allocate(need int) []byte {
  85. bb := b.curBlock
  86. if len(bb.data[bb.end:]) < need {
  87. // We need to reallocate. 1GB is the max size that the allocator can allocate.
  88. // While reallocating, if doubling exceeds that limit, then put the upper bound on it.
  89. sz := 2 * len(bb.data)
  90. if sz > (1 << 30) {
  91. sz = 1 << 30
  92. }
  93. if bb.end+need > sz {
  94. sz = bb.end + need
  95. }
  96. tmp := b.alloc.Allocate(sz)
  97. copy(tmp, bb.data)
  98. bb.data = tmp
  99. }
  100. bb.end += need
  101. return bb.data[bb.end-need : bb.end]
  102. }
  103. // append appends to curBlock.data
  104. func (b *Builder) append(data []byte) {
  105. dst := b.allocate(len(data))
  106. y.AssertTrue(len(data) == copy(dst, data))
  107. }
  108. const maxAllocatorInitialSz = 256 << 20
  109. // NewTableBuilder makes a new TableBuilder.
  110. func NewTableBuilder(opts Options) *Builder {
  111. sz := 2 * int(opts.TableSize)
  112. if sz > maxAllocatorInitialSz {
  113. sz = maxAllocatorInitialSz
  114. }
  115. b := &Builder{
  116. alloc: opts.AllocPool.Get(sz, "TableBuilder"),
  117. opts: &opts,
  118. }
  119. b.alloc.Tag = "Builder"
  120. b.curBlock = &bblock{
  121. data: b.alloc.Allocate(opts.BlockSize + padding),
  122. }
  123. b.opts.tableCapacity = uint64(float64(b.opts.TableSize) * 0.95)
  124. // If encryption or compression is not enabled, do not start compression/encryption goroutines
  125. // and write directly to the buffer.
  126. if b.opts.Compression == options.None && b.opts.DataKey == nil {
  127. return b
  128. }
  129. count := 2 * runtime.NumCPU()
  130. b.blockChan = make(chan *bblock, count*2)
  131. b.wg.Add(count)
  132. for i := 0; i < count; i++ {
  133. go b.handleBlock()
  134. }
  135. return b
  136. }
  137. func maxEncodedLen(ctype options.CompressionType, sz int) int {
  138. switch ctype {
  139. case options.Snappy:
  140. return s2.MaxEncodedLen(sz)
  141. case options.ZSTD:
  142. return y.ZSTDCompressBound(sz)
  143. }
  144. return sz
  145. }
  146. func (b *Builder) handleBlock() {
  147. defer b.wg.Done()
  148. doCompress := b.opts.Compression != options.None
  149. for item := range b.blockChan {
  150. // Extract the block.
  151. blockBuf := item.data[:item.end]
  152. // Compress the block.
  153. if doCompress {
  154. out, err := b.compressData(blockBuf)
  155. y.Check(err)
  156. blockBuf = out
  157. }
  158. if b.shouldEncrypt() {
  159. out, err := b.encrypt(blockBuf)
  160. y.Check(y.Wrapf(err, "Error while encrypting block in table builder."))
  161. blockBuf = out
  162. }
  163. // BlockBuf should always less than or equal to allocated space. If the blockBuf is greater
  164. // than allocated space that means the data from this block cannot be stored in its
  165. // existing location.
  166. allocatedSpace := maxEncodedLen(b.opts.Compression, (item.end)) + padding + 1
  167. y.AssertTrue(len(blockBuf) <= allocatedSpace)
  168. // blockBuf was allocated on allocator. So, we don't need to copy it over.
  169. item.data = blockBuf
  170. item.end = len(blockBuf)
  171. b.compressedSize.Add(uint32(len(blockBuf)))
  172. }
  173. }
  174. // Close closes the TableBuilder.
  175. func (b *Builder) Close() {
  176. b.opts.AllocPool.Return(b.alloc)
  177. }
  178. // Empty returns whether it's empty.
  179. func (b *Builder) Empty() bool { return len(b.keyHashes) == 0 }
  180. // keyDiff returns a suffix of newKey that is different from b.baseKey.
  181. func (b *Builder) keyDiff(newKey []byte) []byte {
  182. var i int
  183. for i = 0; i < len(newKey) && i < len(b.curBlock.baseKey); i++ {
  184. if newKey[i] != b.curBlock.baseKey[i] {
  185. break
  186. }
  187. }
  188. return newKey[i:]
  189. }
  190. func (b *Builder) addHelper(key []byte, v y.ValueStruct, vpLen uint32) {
  191. b.keyHashes = append(b.keyHashes, y.Hash(y.ParseKey(key)))
  192. if version := y.ParseTs(key); version > b.maxVersion {
  193. b.maxVersion = version
  194. }
  195. // diffKey stores the difference of key with baseKey.
  196. var diffKey []byte
  197. if len(b.curBlock.baseKey) == 0 {
  198. // Make a copy. Builder should not keep references. Otherwise, caller has to be very careful
  199. // and will have to make copies of keys every time they add to builder, which is even worse.
  200. b.curBlock.baseKey = append(b.curBlock.baseKey[:0], key...)
  201. diffKey = key
  202. } else {
  203. diffKey = b.keyDiff(key)
  204. }
  205. y.AssertTrue(len(key)-len(diffKey) <= math.MaxUint16)
  206. y.AssertTrue(len(diffKey) <= math.MaxUint16)
  207. h := header{
  208. overlap: uint16(len(key) - len(diffKey)),
  209. diff: uint16(len(diffKey)),
  210. }
  211. // store current entry's offset
  212. b.curBlock.entryOffsets = append(b.curBlock.entryOffsets, uint32(b.curBlock.end))
  213. // Layout: header, diffKey, value.
  214. b.append(h.Encode())
  215. b.append(diffKey)
  216. dst := b.allocate(int(v.EncodedSize()))
  217. v.Encode(dst)
  218. // Add the vpLen to the onDisk size. We'll add the size of the block to
  219. // onDisk size in Finish() function.
  220. b.onDiskSize += vpLen
  221. }
  222. /*
  223. Structure of Block.
  224. +-------------------+---------------------+--------------------+--------------+------------------+
  225. | Entry1 | Entry2 | Entry3 | Entry4 | Entry5 |
  226. +-------------------+---------------------+--------------------+--------------+------------------+
  227. | Entry6 | ... | ... | ... | EntryN |
  228. +-------------------+---------------------+--------------------+--------------+------------------+
  229. | Block Meta(contains list of offsets used| Block Meta Size | Block | Checksum Size |
  230. | to perform binary search in the block) | (4 Bytes) | Checksum | (4 Bytes) |
  231. +-----------------------------------------+--------------------+--------------+------------------+
  232. */
  233. // In case the data is encrypted, the "IV" is added to the end of the block.
  234. func (b *Builder) finishBlock() {
  235. if len(b.curBlock.entryOffsets) == 0 {
  236. return
  237. }
  238. // Append the entryOffsets and its length.
  239. b.append(y.U32SliceToBytes(b.curBlock.entryOffsets))
  240. b.append(y.U32ToBytes(uint32(len(b.curBlock.entryOffsets))))
  241. checksum := b.calculateChecksum(b.curBlock.data[:b.curBlock.end])
  242. // Append the block checksum and its length.
  243. b.append(checksum)
  244. b.append(y.U32ToBytes(uint32(len(checksum))))
  245. b.blockList = append(b.blockList, b.curBlock)
  246. b.uncompressedSize.Add(uint32(b.curBlock.end))
  247. // Add length of baseKey (rounded to next multiple of 4 because of alignment).
  248. // Add another 40 Bytes, these additional 40 bytes consists of
  249. // 12 bytes of metadata of flatbuffer
  250. // 8 bytes for Key in flat buffer
  251. // 8 bytes for offset
  252. // 8 bytes for the len
  253. // 4 bytes for the size of slice while SliceAllocate
  254. b.lenOffsets += uint32(int(math.Ceil(float64(len(b.curBlock.baseKey))/4))*4) + 40
  255. // If compression/encryption is enabled, we need to send the block to the blockChan.
  256. if b.blockChan != nil {
  257. b.blockChan <- b.curBlock
  258. }
  259. }
  260. func (b *Builder) shouldFinishBlock(key []byte, value y.ValueStruct) bool {
  261. // If there is no entry till now, we will return false.
  262. if len(b.curBlock.entryOffsets) <= 0 {
  263. return false
  264. }
  265. // Integer overflow check for statements below.
  266. y.AssertTrue((uint32(len(b.curBlock.entryOffsets))+1)*4+4+8+4 < math.MaxUint32)
  267. // We should include current entry also in size, that's why +1 to len(b.entryOffsets).
  268. entriesOffsetsSize := uint32((len(b.curBlock.entryOffsets)+1)*4 +
  269. 4 + // size of list
  270. 8 + // Sum64 in checksum proto
  271. 4) // checksum length
  272. estimatedSize := uint32(b.curBlock.end) + uint32(6 /*header size for entry*/) +
  273. uint32(len(key)) + value.EncodedSize() + entriesOffsetsSize
  274. if b.shouldEncrypt() {
  275. // IV is added at the end of the block, while encrypting.
  276. // So, size of IV is added to estimatedSize.
  277. estimatedSize += aes.BlockSize
  278. }
  279. // Integer overflow check for table size.
  280. y.AssertTrue(uint64(b.curBlock.end)+uint64(estimatedSize) < math.MaxUint32)
  281. return estimatedSize > uint32(b.opts.BlockSize)
  282. }
  283. // AddStaleKey is same is Add function but it also increments the internal
  284. // staleDataSize counter. This value will be used to prioritize this table for
  285. // compaction.
  286. func (b *Builder) AddStaleKey(key []byte, v y.ValueStruct, valueLen uint32) {
  287. // Rough estimate based on how much space it will occupy in the SST.
  288. b.staleDataSize += len(key) + len(v.Value) + 4 /* entry offset */ + 4 /* header size */
  289. b.addInternal(key, v, valueLen, true)
  290. }
  291. // Add adds a key-value pair to the block.
  292. func (b *Builder) Add(key []byte, value y.ValueStruct, valueLen uint32) {
  293. b.addInternal(key, value, valueLen, false)
  294. }
  295. func (b *Builder) addInternal(key []byte, value y.ValueStruct, valueLen uint32, isStale bool) {
  296. if b.shouldFinishBlock(key, value) {
  297. if isStale {
  298. // This key will be added to tableIndex and it is stale.
  299. b.staleDataSize += len(key) + 4 /* len */ + 4 /* offset */
  300. }
  301. b.finishBlock()
  302. // Create a new block and start writing.
  303. b.curBlock = &bblock{
  304. data: b.alloc.Allocate(b.opts.BlockSize + padding),
  305. }
  306. }
  307. b.addHelper(key, value, valueLen)
  308. }
  309. // TODO: vvv this was the comment on ReachedCapacity.
  310. // FinalSize returns the *rough* final size of the array, counting the header which is
  311. // not yet written.
  312. // TODO: Look into why there is a discrepancy. I suspect it is because of Write(empty, empty)
  313. // at the end. The diff can vary.
  314. // ReachedCapacity returns true if we... roughly (?) reached capacity?
  315. func (b *Builder) ReachedCapacity() bool {
  316. // If encryption/compression is enabled then use the compresssed size.
  317. sumBlockSizes := b.compressedSize.Load()
  318. if b.opts.Compression == options.None && b.opts.DataKey == nil {
  319. sumBlockSizes = b.uncompressedSize.Load()
  320. }
  321. blocksSize := sumBlockSizes + // actual length of current buffer
  322. uint32(len(b.curBlock.entryOffsets)*4) + // all entry offsets size
  323. 4 + // count of all entry offsets
  324. 8 + // checksum bytes
  325. 4 // checksum length
  326. estimateSz := blocksSize +
  327. 4 + // Index length
  328. b.lenOffsets
  329. return uint64(estimateSz) > b.opts.tableCapacity
  330. }
  331. // Finish finishes the table by appending the index.
  332. /*
  333. The table structure looks like
  334. +---------+------------+-----------+---------------+
  335. | Block 1 | Block 2 | Block 3 | Block 4 |
  336. +---------+------------+-----------+---------------+
  337. | Block 5 | Block 6 | Block ... | Block N |
  338. +---------+------------+-----------+---------------+
  339. | Index | Index Size | Checksum | Checksum Size |
  340. +---------+------------+-----------+---------------+
  341. */
  342. // In case the data is encrypted, the "IV" is added to the end of the index.
  343. func (b *Builder) Finish() []byte {
  344. bd := b.Done()
  345. buf := make([]byte, bd.Size)
  346. written := bd.Copy(buf)
  347. y.AssertTrue(written == len(buf))
  348. return buf
  349. }
  350. type buildData struct {
  351. blockList []*bblock
  352. index []byte
  353. checksum []byte
  354. Size int
  355. alloc *z.Allocator
  356. }
  357. func (bd *buildData) Copy(dst []byte) int {
  358. var written int
  359. for _, bl := range bd.blockList {
  360. written += copy(dst[written:], bl.data[:bl.end])
  361. }
  362. written += copy(dst[written:], bd.index)
  363. written += copy(dst[written:], y.U32ToBytes(uint32(len(bd.index))))
  364. written += copy(dst[written:], bd.checksum)
  365. written += copy(dst[written:], y.U32ToBytes(uint32(len(bd.checksum))))
  366. return written
  367. }
  368. func (b *Builder) Done() buildData {
  369. b.finishBlock() // This will never start a new block.
  370. if b.blockChan != nil {
  371. close(b.blockChan)
  372. }
  373. // Wait for block handler to finish.
  374. b.wg.Wait()
  375. if len(b.blockList) == 0 {
  376. return buildData{}
  377. }
  378. bd := buildData{
  379. blockList: b.blockList,
  380. alloc: b.alloc,
  381. }
  382. var f y.Filter
  383. if b.opts.BloomFalsePositive > 0 {
  384. bits := y.BloomBitsPerKey(len(b.keyHashes), b.opts.BloomFalsePositive)
  385. f = y.NewFilter(b.keyHashes, bits)
  386. }
  387. index, dataSize := b.buildIndex(f)
  388. var err error
  389. if b.shouldEncrypt() {
  390. index, err = b.encrypt(index)
  391. y.Check(err)
  392. }
  393. checksum := b.calculateChecksum(index)
  394. bd.index = index
  395. bd.checksum = checksum
  396. bd.Size = int(dataSize) + len(index) + len(checksum) + 4 + 4
  397. return bd
  398. }
  399. func (b *Builder) calculateChecksum(data []byte) []byte {
  400. // Build checksum for the index.
  401. checksum := pb.Checksum{
  402. // TODO: The checksum type should be configurable from the
  403. // options.
  404. // We chose to use CRC32 as the default option because
  405. // it performed better compared to xxHash64.
  406. // See the BenchmarkChecksum in table_test.go file
  407. // Size => 1024 B 2048 B
  408. // CRC32 => 63.7 ns/op 112 ns/op
  409. // xxHash64 => 87.5 ns/op 158 ns/op
  410. Sum: y.CalculateChecksum(data, pb.Checksum_CRC32C),
  411. Algo: pb.Checksum_CRC32C,
  412. }
  413. // Write checksum to the file.
  414. chksum, err := proto.Marshal(&checksum)
  415. y.Check(err)
  416. // Write checksum size.
  417. return chksum
  418. }
  419. // DataKey returns datakey of the builder.
  420. func (b *Builder) DataKey() *pb.DataKey {
  421. return b.opts.DataKey
  422. }
  423. func (b *Builder) Opts() *Options {
  424. return b.opts
  425. }
  426. // encrypt will encrypt the given data and appends IV to the end of the encrypted data.
  427. // This should be only called only after checking shouldEncrypt method.
  428. func (b *Builder) encrypt(data []byte) ([]byte, error) {
  429. iv, err := y.GenerateIV()
  430. if err != nil {
  431. return data, y.Wrapf(err, "Error while generating IV in Builder.encrypt")
  432. }
  433. needSz := len(data) + len(iv)
  434. dst := b.alloc.Allocate(needSz)
  435. if err = y.XORBlock(dst[:len(data)], data, b.DataKey().Data, iv); err != nil {
  436. return data, y.Wrapf(err, "Error while encrypting in Builder.encrypt")
  437. }
  438. y.AssertTrue(len(iv) == copy(dst[len(data):], iv))
  439. return dst, nil
  440. }
  441. // shouldEncrypt tells us whether to encrypt the data or not.
  442. // We encrypt only if the data key exist. Otherwise, not.
  443. func (b *Builder) shouldEncrypt() bool {
  444. return b.opts.DataKey != nil
  445. }
  446. // compressData compresses the given data.
  447. func (b *Builder) compressData(data []byte) ([]byte, error) {
  448. switch b.opts.Compression {
  449. case options.None:
  450. return data, nil
  451. case options.Snappy:
  452. sz := s2.MaxEncodedLen(len(data))
  453. dst := b.alloc.Allocate(sz)
  454. return s2.EncodeSnappy(dst, data), nil
  455. case options.ZSTD:
  456. sz := y.ZSTDCompressBound(len(data))
  457. dst := b.alloc.Allocate(sz)
  458. return y.ZSTDCompress(dst, data, b.opts.ZSTDCompressionLevel)
  459. }
  460. return nil, errors.New("Unsupported compression type")
  461. }
  462. func (b *Builder) buildIndex(bloom []byte) ([]byte, uint32) {
  463. builder := fbs.NewBuilder(3 << 20)
  464. boList, dataSize := b.writeBlockOffsets(builder)
  465. // Write block offset vector the the idxBuilder.
  466. fb.TableIndexStartOffsetsVector(builder, len(boList))
  467. // Write individual block offsets in reverse order to work around how Flatbuffers expects it.
  468. for i := len(boList) - 1; i >= 0; i-- {
  469. builder.PrependUOffsetT(boList[i])
  470. }
  471. boEnd := builder.EndVector(len(boList))
  472. var bfoff fbs.UOffsetT
  473. // Write the bloom filter.
  474. if len(bloom) > 0 {
  475. bfoff = builder.CreateByteVector(bloom)
  476. }
  477. b.onDiskSize += dataSize
  478. fb.TableIndexStart(builder)
  479. fb.TableIndexAddOffsets(builder, boEnd)
  480. fb.TableIndexAddBloomFilter(builder, bfoff)
  481. fb.TableIndexAddMaxVersion(builder, b.maxVersion)
  482. fb.TableIndexAddUncompressedSize(builder, b.uncompressedSize.Load())
  483. fb.TableIndexAddKeyCount(builder, uint32(len(b.keyHashes)))
  484. fb.TableIndexAddOnDiskSize(builder, b.onDiskSize)
  485. fb.TableIndexAddStaleDataSize(builder, uint32(b.staleDataSize))
  486. builder.Finish(fb.TableIndexEnd(builder))
  487. buf := builder.FinishedBytes()
  488. index := fb.GetRootAsTableIndex(buf, 0)
  489. // Mutate the ondisk size to include the size of the index as well.
  490. y.AssertTrue(index.MutateOnDiskSize(index.OnDiskSize() + uint32(len(buf))))
  491. return buf, dataSize
  492. }
  493. // writeBlockOffsets writes all the blockOffets in b.offsets and returns the
  494. // offsets for the newly written items.
  495. func (b *Builder) writeBlockOffsets(builder *fbs.Builder) ([]fbs.UOffsetT, uint32) {
  496. var startOffset uint32
  497. var uoffs []fbs.UOffsetT
  498. for _, bl := range b.blockList {
  499. uoff := b.writeBlockOffset(builder, bl, startOffset)
  500. uoffs = append(uoffs, uoff)
  501. startOffset += uint32(bl.end)
  502. }
  503. return uoffs, startOffset
  504. }
  505. // writeBlockOffset writes the given key,offset,len triple to the indexBuilder.
  506. // It returns the offset of the newly written blockoffset.
  507. func (b *Builder) writeBlockOffset(
  508. builder *fbs.Builder, bl *bblock, startOffset uint32) fbs.UOffsetT {
  509. // Write the key to the buffer.
  510. k := builder.CreateByteVector(bl.baseKey)
  511. // Build the blockOffset.
  512. fb.BlockOffsetStart(builder)
  513. fb.BlockOffsetAddKey(builder, k)
  514. fb.BlockOffsetAddOffset(builder, startOffset)
  515. fb.BlockOffsetAddLen(builder, uint32(bl.end))
  516. return fb.BlockOffsetEnd(builder)
  517. }