summaryrefslogtreecommitdiff
path: root/server/flate.go
diff options
context:
space:
mode:
Diffstat (limited to 'server/flate.go')
-rw-r--r--server/flate.go159
1 files changed, 159 insertions, 0 deletions
diff --git a/server/flate.go b/server/flate.go
new file mode 100644
index 0000000..99517c1
--- /dev/null
+++ b/server/flate.go
@@ -0,0 +1,159 @@
+package server
+
+import (
+ "io"
+ "encoding/binary"
+)
+
+// the protocol requires us to send the level as a gzipped flat array. but our
+// level representation is close to run length encoding that can be achieved
+// with DEFLATE. it is thus in theory significantly more CPU efficient to,
+// instead of expanding out the RLE and compressing it with the generic
+// compressor, translate it directly into DEFLATE
+func deflateRuns(
+ wr io.Writer, runs []blockData, changes []blockData,
+ levelSize uint32) error {
+ var err error
+ bw := bitsWriter {wr: wr}
+ bw.Write(0b01, 2) // fixed huffman
+ writeCode(&bw, 0xff & levelSize)
+ writeCode(&bw, 0xff & (levelSize >> 8))
+ writeCode(&bw, 0xff & (levelSize >> 16))
+ err = writeCode(&bw, 0xff & levelSize >> 24)
+ if err != nil {
+ return err
+ }
+ for i, data := range runs {
+ var nextPos uint32
+ if i < len(runs) - 1 {
+ nextPos = runs[i + 1].Pos
+ } else {
+ nextPos = levelSize
+ }
+ curPos := data.Pos
+ for len(changes) > 0 {
+ ch := changes[0]
+ if ch.Pos >= nextPos {
+ break
+ }
+ changes = changes[1:]
+ writeRun(&bw, byte(data.Block), ch.Pos - curPos)
+ err = writeRun(&bw, byte(ch.Block), 1)
+ if err != nil {
+ return err
+ }
+ curPos = ch.Pos + 1
+ }
+ if curPos < nextPos {
+ err = writeRun(&bw, byte(data.Block), nextPos - curPos)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return writeCode(&bw, 256) // end of block
+}
+
+func writeRun(bw *bitsWriter, value byte, totalLen uint32) error {
+ var err error
+ for totalLen > 0 {
+ len := min(totalLen, 258)
+ if len < 3 {
+ for i := 0; i < int(len); i++ {
+ err = writeCode(bw, uint32(value))
+ }
+ } else {
+ writeCode(bw, uint32(value))
+ err = writeRunLength(bw, len)
+ }
+ totalLen -= len
+ }
+ return err
+}
+
+func writeRunLength(bw *bitsWriter, length uint32) error {
+ length -= 3
+ if length < 11 {
+ writeCode(bw, 257 + length)
+ } else if length < 19 {
+ writeCode(bw, 265 + length >> 1)
+ bw.Write(length & 0b1, 1)
+ } else if length < 35 {
+ writeCode(bw, 269 + length >> 2)
+ bw.Write(length & 0b11, 2)
+ } else if length < 115 {
+ writeCode(bw, 273 + length >> 3)
+ bw.Write(length & 0b111, 3)
+ } else if length < 131 {
+ writeCode(bw, 273 + length >> 4)
+ bw.Write(length & 0b1111, 4)
+ } else if length < 285 {
+ writeCode(bw, 281 + length >> 5)
+ bw.Write(length & 0b11111, 5)
+ } else if length == 258 {
+ writeCode(bw, 285)
+ } else {
+ panic("length out of range")
+ }
+ return writeCode(bw, 0)
+}
+
+func writeCode(bw *bitsWriter, v uint32) error {
+ if v < 144 {
+ return bw.Write(0b00110000 + v, 8)
+ } else if v < 256 {
+ return bw.Write(0b110010000 + v, 9)
+ } else if v < 280 {
+ return bw.Write(v, 7)
+ } else {
+ return bw.Write(0b11000000, 8)
+ }
+}
+
+type bitsWriter struct {
+ wr io.Writer
+ buf uint64
+ nbuf byte
+}
+func (bw *bitsWriter) Write(bits uint32, n byte) error {
+ bw.buf |= uint64(bits & ((1<<n)-1)) << bw.nbuf
+ bw.nbuf += n
+ if bw.nbuf < 32 {
+ return nil
+ }
+ var bytes [8]byte
+ binary.LittleEndian.PutUint64(bytes[:], bw.buf)
+ _, err := bw.wr.Write(bytes[:bw.nbuf / 8])
+ bw.buf >>= bw.nbuf / 8
+ bw.nbuf -= bw.nbuf / 8
+ return err
+}
+
+func (bw *bitsWriter) Flush() error {
+ return bw.Write(0, 0)
+}
+
+func writePointlessGzipHeader(wr io.Writer) error {
+ var header = [10]byte {
+ 0x1f, 0x8b,
+ 0x08,
+ 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00,
+ 0x09,
+ }
+ _, err := wr.Write(header[:])
+ return err
+}
+
+func writePointlessGzipTrailer(wr io.Writer, length uint32) error {
+ var trailer = [8]byte {
+ 0, 0, 0, 0,
+ byte(length),
+ byte(length >> 8),
+ byte(length >> 16),
+ byte(length >> 24),
+ }
+ _, err := wr.Write(trailer[:])
+ return err
+}