aboutsummaryrefslogtreecommitdiff
path: root/core/src/main/scala/spark/network/netty/FileHeader.scala
blob: aed4254234e391361192b53aecebf947dfd2c0a8 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
package spark.network.netty

import io.netty.buffer._

import spark.Logging

private[spark] class FileHeader (
  val fileLen: Int,
  val blockId: String) extends Logging {

  lazy val buffer = {
    val buf = Unpooled.buffer()
    buf.capacity(FileHeader.HEADER_SIZE)
    buf.writeInt(fileLen)
    buf.writeInt(blockId.length)
    blockId.foreach((x: Char) => buf.writeByte(x))
    //padding the rest of header
    if (FileHeader.HEADER_SIZE - buf.readableBytes > 0 ) {
      buf.writeZero(FileHeader.HEADER_SIZE - buf.readableBytes)
    } else {
      throw new Exception("too long header " + buf.readableBytes) 
      logInfo("too long header") 
    }
    buf
  }

}

private[spark] object FileHeader {

  val HEADER_SIZE = 40

  def getFileLenOffset = 0
  def getFileLenSize = Integer.SIZE/8

  def create(buf: ByteBuf): FileHeader = {
    val length = buf.readInt
    val idLength = buf.readInt
    val idBuilder = new StringBuilder(idLength)
    for (i <- 1 to idLength) {
      idBuilder += buf.readByte().asInstanceOf[Char]
    }
    val blockId = idBuilder.toString()
    new FileHeader(length, blockId)
  }


  def main (args:Array[String]){

    val header = new FileHeader(25,"block_0");
    val buf = header.buffer;
    val newheader = FileHeader.create(buf);
    System.out.println("id="+newheader.blockId+",size="+newheader.fileLen)

  }
}