summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--server/flate.go159
-rw-r--r--server/map.go22
-rw-r--r--server/player.go6
3 files changed, 174 insertions, 13 deletions
diff --git a/server/flate.go b/server/flate.go
new file mode 100644
index 0000000..99517c1
--- /dev/null
+++ b/server/flate.go
@@ -0,0 +1,159 @@
+package server
+
+import (
+ "io"
+ "encoding/binary"
+)
+
+// the protocol requires us to send the level as a gzipped flat array. but our
+// level representation is close to run length encoding that can be achieved
+// with DEFLATE. it is thus in theory significantly more CPU efficient to,
+// instead of expanding out the RLE and compressing it with the generic
+// compressor, translate it directly into DEFLATE
+func deflateRuns(
+ wr io.Writer, runs []blockData, changes []blockData,
+ levelSize uint32) error {
+ var err error
+ bw := bitsWriter {wr: wr}
+ bw.Write(0b01, 2) // fixed huffman
+ writeCode(&bw, 0xff & levelSize)
+ writeCode(&bw, 0xff & (levelSize >> 8))
+ writeCode(&bw, 0xff & (levelSize >> 16))
+ err = writeCode(&bw, 0xff & levelSize >> 24)
+ if err != nil {
+ return err
+ }
+ for i, data := range runs {
+ var nextPos uint32
+ if i < len(runs) - 1 {
+ nextPos = runs[i + 1].Pos
+ } else {
+ nextPos = levelSize
+ }
+ curPos := data.Pos
+ for len(changes) > 0 {
+ ch := changes[0]
+ if ch.Pos >= nextPos {
+ break
+ }
+ changes = changes[1:]
+ writeRun(&bw, byte(data.Block), ch.Pos - curPos)
+ err = writeRun(&bw, byte(ch.Block), 1)
+ if err != nil {
+ return err
+ }
+ curPos = ch.Pos + 1
+ }
+ if curPos < nextPos {
+ err = writeRun(&bw, byte(data.Block), nextPos - curPos)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return writeCode(&bw, 256) // end of block
+}
+
+func writeRun(bw *bitsWriter, value byte, totalLen uint32) error {
+ var err error
+ for totalLen > 0 {
+ len := min(totalLen, 258)
+ if len < 3 {
+ for i := 0; i < int(len); i++ {
+ err = writeCode(bw, uint32(value))
+ }
+ } else {
+ writeCode(bw, uint32(value))
+ err = writeRunLength(bw, len)
+ }
+ totalLen -= len
+ }
+ return err
+}
+
+func writeRunLength(bw *bitsWriter, length uint32) error {
+ length -= 3
+ if length < 11 {
+ writeCode(bw, 257 + length)
+ } else if length < 19 {
+ writeCode(bw, 265 + length >> 1)
+ bw.Write(length & 0b1, 1)
+ } else if length < 35 {
+ writeCode(bw, 269 + length >> 2)
+ bw.Write(length & 0b11, 2)
+ } else if length < 115 {
+ writeCode(bw, 273 + length >> 3)
+ bw.Write(length & 0b111, 3)
+ } else if length < 131 {
+ writeCode(bw, 273 + length >> 4)
+ bw.Write(length & 0b1111, 4)
+ } else if length < 285 {
+ writeCode(bw, 281 + length >> 5)
+ bw.Write(length & 0b11111, 5)
+ } else if length == 258 {
+ writeCode(bw, 285)
+ } else {
+ panic("length out of range")
+ }
+ return writeCode(bw, 0)
+}
+
+func writeCode(bw *bitsWriter, v uint32) error {
+ if v < 144 {
+ return bw.Write(0b00110000 + v, 8)
+ } else if v < 256 {
+ return bw.Write(0b110010000 + v, 9)
+ } else if v < 280 {
+ return bw.Write(v, 7)
+ } else {
+ return bw.Write(0b11000000, 8)
+ }
+}
+
+type bitsWriter struct {
+ wr io.Writer
+ buf uint64
+ nbuf byte
+}
+func (bw *bitsWriter) Write(bits uint32, n byte) error {
+ bw.buf |= uint64(bits & ((1<<n)-1)) << bw.nbuf
+ bw.nbuf += n
+ if bw.nbuf < 32 {
+ return nil
+ }
+ var bytes [8]byte
+ binary.LittleEndian.PutUint64(bytes[:], bw.buf)
+ _, err := bw.wr.Write(bytes[:bw.nbuf / 8])
+ bw.buf >>= bw.nbuf / 8
+ bw.nbuf -= bw.nbuf / 8
+ return err
+}
+
+func (bw *bitsWriter) Flush() error {
+ return bw.Write(0, 0)
+}
+
+func writePointlessGzipHeader(wr io.Writer) error {
+ var header = [10]byte {
+ 0x1f, 0x8b,
+ 0x08,
+ 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00,
+ 0x09,
+ }
+ _, err := wr.Write(header[:])
+ return err
+}
+
+func writePointlessGzipTrailer(wr io.Writer, length uint32) error {
+ var trailer = [8]byte {
+ 0, 0, 0, 0,
+ byte(length),
+ byte(length >> 8),
+ byte(length >> 16),
+ byte(length >> 24),
+ }
+ _, err := wr.Write(trailer[:])
+ return err
+}
diff --git a/server/map.go b/server/map.go
index e84ca55..f210d42 100644
--- a/server/map.go
+++ b/server/map.go
@@ -95,6 +95,9 @@ func (v *blockVolume) unsyncSetBlock(p blockPos, t blockType) {
}
func (v *blockVolume) unsyncFlushChanges() {
+ if len(v.changes) == 0 {
+ return
+ }
var (
oldRuns = v.blockRuns
changes = v.changes
@@ -198,21 +201,14 @@ func (v *blockVolume) unsyncBlocksInRange(min, max blockPos) []blockType {
func (v *blockVolume) syncCompressForNetwork(/*highBits bool*/) io.ReadCloser {
rd, wr := io.Pipe()
go func() {
- defer wr.Close()
- z := gzip.NewWriter(wr)
- defer z.Close()
- bw := bufio.NewWriter(z)
- defer bw.Flush()
-
v.RLock()
defer v.RUnlock()
- binary.Write(bw, binary.BigEndian, uint32(v.size.X*v.size.Y*v.size.Z))
- for block := range v.unsyncGetAll() {
- err := bw.WriteByte(byte(block))
- if err != nil {
- return
- }
- }
+ bw := bufio.NewWriter(wr)
+ defer bw.Flush()
+ writePointlessGzipHeader(bw)
+ len := uint32(v.size.X*v.size.Y*v.size.Z)
+ deflateRuns(wr, v.blockRuns, v.changes, len)
+ writePointlessGzipTrailer(bw, len)
}()
return rd
}
diff --git a/server/player.go b/server/player.go
index d2c6722..a6219cc 100644
--- a/server/player.go
+++ b/server/player.go
@@ -272,6 +272,12 @@ func (p *player) OnLevelData(from *level, info levelInfo, data io.ReadCloser) {
}
packets := func(yield func(classic.Packet) bool) {
defer data.Close()
+ dbg, err := os.OpenFile("./debug.gz", os.O_CREATE | os.O_WRONLY, 0777)
+ if err != nil {
+ panic(err)
+ }
+ defer dbg.Close()
+ data := io.TeeReader(data, dbg)
for {
var packet classic.LevelDataChunk
n, err := io.ReadFull(data, packet.Data[:])