From 096cdac80d222ac0be2a0554a759a0b16c1c34f6 Mon Sep 17 00:00:00 2001 From: peterbell10 Date: Mon, 21 Aug 2017 17:56:53 +0100 Subject: Implement protocol level chunk sparsing (#3864) --- src/Protocol/ChunkDataSerializer.cpp | 347 ++++++++++++++++++----------------- src/Protocol/ChunkDataSerializer.h | 15 +- 2 files changed, 186 insertions(+), 176 deletions(-) (limited to 'src/Protocol') diff --git a/src/Protocol/ChunkDataSerializer.cpp b/src/Protocol/ChunkDataSerializer.cpp index bbedb3c59..b29981864 100644 --- a/src/Protocol/ChunkDataSerializer.cpp +++ b/src/Protocol/ChunkDataSerializer.cpp @@ -15,18 +15,34 @@ + +/** Calls the given function with every present chunk section. */ +template +void ForEachSection(const cChunkData & a_Data, Func a_Func) +{ + for (size_t SectionIdx = 0; SectionIdx < cChunkData::NumSections; ++SectionIdx) + { + auto Section = a_Data.GetSection(SectionIdx); + if (Section != nullptr) + { + a_Func(*Section); + } + } +} + + + + + +//////////////////////////////////////////////////////////////////////////////// +// cChunkDataSerializer: + cChunkDataSerializer::cChunkDataSerializer( - const cChunkDef::BlockTypes & a_BlockTypes, - const cChunkDef::BlockNibbles & a_BlockMetas, - const cChunkDef::BlockNibbles & a_BlockLight, - const cChunkDef::BlockNibbles & a_BlockSkyLight, - const unsigned char * a_BiomeData, - const eDimension a_Dimension -) : - m_BlockTypes(a_BlockTypes), - m_BlockMetas(a_BlockMetas), - m_BlockLight(a_BlockLight), - m_BlockSkyLight(a_BlockSkyLight), + const cChunkData & a_Data, + const unsigned char * a_BiomeData, + const eDimension a_Dimension +): + m_Data(a_Data), m_BiomeData(a_BiomeData), m_Dimension(a_Dimension) { @@ -35,6 +51,7 @@ cChunkDataSerializer::cChunkDataSerializer( + const AString & cChunkDataSerializer::Serialize(int a_Version, int a_ChunkX, int a_ChunkZ) { Serializations::const_iterator itr = m_Serializations.find(a_Version); @@ -68,6 +85,7 @@ const AString & cChunkDataSerializer::Serialize(int a_Version, int a_ChunkX, int + void cChunkDataSerializer::Serialize47(AString & a_Data, int a_ChunkX, int a_ChunkZ) { // This function returns the fully compressed packet (including packet size), not the raw packet! @@ -77,32 +95,49 @@ void cChunkDataSerializer::Serialize47(AString & a_Data, int a_ChunkX, int a_Chu Packet.WriteVarInt32(0x21); // Packet id (Chunk Data packet) Packet.WriteBEInt32(a_ChunkX); Packet.WriteBEInt32(a_ChunkZ); - Packet.WriteBool(true); // "Ground-up continuous", or rather, "biome data present" flag - Packet.WriteBEUInt16(0xffff); // We're aways sending the full chunk with no additional data, so the bitmap is 0xffff + Packet.WriteBool(true); // "Ground-up continuous", or rather, "biome data present" flag + Packet.WriteBEUInt16(m_Data.GetSectionBitmask()); // Write the chunk size: const int BiomeDataSize = cChunkDef::Width * cChunkDef::Width; UInt32 ChunkSize = ( - (cChunkDef::NumBlocks * 2) + // Block meta + type - sizeof(m_BlockLight) + // Block light - sizeof(m_BlockSkyLight) + // Block sky light - BiomeDataSize // Biome data + m_Data.NumPresentSections() * cChunkData::SectionBlockCount * 3 + // Blocks and lighting + BiomeDataSize // Biome data ); Packet.WriteVarInt32(ChunkSize); + // Chunk written as seperate arrays of (blocktype + meta), blocklight and skylight + // each array stores all present sections of the same kind packed together + // Write the block types to the packet: - for (size_t Index = 0; Index < cChunkDef::NumBlocks; Index++) - { - BLOCKTYPE BlockType = m_BlockTypes[Index] & 0xFF; - NIBBLETYPE BlockMeta = m_BlockMetas[Index / 2] >> ((Index & 1) * 4) & 0x0f; - Packet.WriteBEUInt8(static_cast(BlockType << 4) | BlockMeta); - Packet.WriteBEUInt8(static_cast(BlockType >> 4)); - } + ForEachSection(m_Data, [&](const cChunkData::sChunkSection & a_Section) + { + for (size_t BlockIdx = 0; BlockIdx != cChunkData::SectionBlockCount; ++BlockIdx) + { + BLOCKTYPE BlockType = a_Section.m_BlockTypes[BlockIdx] & 0xFF; + NIBBLETYPE BlockMeta = a_Section.m_BlockMetas[BlockIdx / 2] >> ((BlockIdx & 1) * 4) & 0x0f; + Packet.WriteBEUInt8(static_cast(BlockType << 4) | BlockMeta); + Packet.WriteBEUInt8(static_cast(BlockType >> 4)); + } + } + ); + + // Write the block lights: + ForEachSection(m_Data, [&](const cChunkData::sChunkSection & a_Section) + { + Packet.WriteBuf(a_Section.m_BlockLight, sizeof(a_Section.m_BlockLight)); + } + ); - // Write the rest: - Packet.WriteBuf(m_BlockLight, sizeof(m_BlockLight)); - Packet.WriteBuf(m_BlockSkyLight, sizeof(m_BlockSkyLight)); - Packet.WriteBuf(m_BiomeData, BiomeDataSize); + // Write the sky lights: + ForEachSection(m_Data, [&](const cChunkData::sChunkSection & a_Section) + { + Packet.WriteBuf(a_Section.m_BlockSkyLight, sizeof(a_Section.m_BlockSkyLight)); + } + ); + + // Write the biome data: + Packet.WriteBuf(m_BiomeData, BiomeDataSize); AString PacketData; Packet.ReadAll(PacketData); @@ -147,102 +182,92 @@ void cChunkDataSerializer::Serialize107(AString & a_Data, int a_ChunkX, int a_Ch Packet.WriteBEInt32(a_ChunkX); Packet.WriteBEInt32(a_ChunkZ); Packet.WriteBool(true); // "Ground-up continuous", or rather, "biome data present" flag - Packet.WriteVarInt32(0x0000ffff); // We're aways sending the full chunk with no additional data, so the bitmap is 0xffff + Packet.WriteVarInt32(m_Data.GetSectionBitmask()); // Write the chunk size: - const size_t NumChunkSections = 16; - const size_t ChunkSectionBlocks = 16 * 16 * 16; const size_t BitsPerEntry = 13; const size_t Mask = (1 << BitsPerEntry) - 1; // Creates a mask that is 13 bits long, ie 0b1111111111111 - const size_t ChunkSectionDataArraySize = (ChunkSectionBlocks * BitsPerEntry) / 8 / 8; // Convert from bit count to long count + const size_t ChunkSectionDataArraySize = (cChunkData::SectionBlockCount * BitsPerEntry) / 8 / 8; // Convert from bit count to long count size_t ChunkSectionSize = ( - 1 + // Bits per block - set to 13, so the global palette is used and the palette has a length of 0 - 1 + // Palette length - 2 + // Data array length VarInt - 2 bytes for the current value - ChunkSectionDataArraySize * 8 + // Actual block data - multiplied by 8 because first number is longs - sizeof(m_BlockLight) / NumChunkSections // Block light + 1 + // Bits per block - set to 13, so the global palette is used and the palette has a length of 0 + 1 + // Palette length + 2 + // Data array length VarInt - 2 bytes for the current value + ChunkSectionDataArraySize * 8 + // Actual block data - multiplied by 8 because first number is longs + cChunkData::SectionBlockCount / 2 // Block light ); if (m_Dimension == dimOverworld) { // Sky light is only sent in the overworld. - ChunkSectionSize += sizeof(m_BlockSkyLight) / NumChunkSections; + ChunkSectionSize += cChunkData::SectionBlockCount / 2; } const size_t BiomeDataSize = cChunkDef::Width * cChunkDef::Width; size_t ChunkSize = ( - ChunkSectionSize * 16 + + ChunkSectionSize * m_Data.NumPresentSections() + BiomeDataSize ); Packet.WriteVarInt32(static_cast(ChunkSize)); // Write each chunk section... - for (size_t SectionIndex = 0; SectionIndex < 16; SectionIndex++) - { - Packet.WriteBEUInt8(BitsPerEntry); - Packet.WriteVarInt32(0); // Palette length is 0 - Packet.WriteVarInt32(static_cast(ChunkSectionDataArraySize)); - - size_t StartIndex = SectionIndex * ChunkSectionBlocks; - - UInt64 TempLong = 0; // Temporary value that will be stored into - UInt64 CurrentlyWrittenIndex = 0; // "Index" of the long that would be written to - - for (size_t Index = 0; Index < ChunkSectionBlocks; Index++) + ForEachSection(m_Data, [&](const cChunkData::sChunkSection & a_Section) { - UInt64 Value = static_cast(m_BlockTypes[StartIndex + Index] << 4); - if (Index % 2 == 0) - { - Value |= m_BlockMetas[(StartIndex + Index) / 2] & 0x0f; - } - else - { - Value |= m_BlockMetas[(StartIndex + Index) / 2] >> 4; - } - Value &= Mask; // It shouldn't go out of bounds, but it's still worth being careful - - // Painful part where we write data into the long array. Based off of the normal code. - size_t BitPosition = Index * BitsPerEntry; - size_t FirstIndex = BitPosition / 64; - size_t SecondIndex = ((Index + 1) * BitsPerEntry - 1) / 64; - size_t BitOffset = BitPosition % 64; - - if (FirstIndex != CurrentlyWrittenIndex) - { - // Write the current data before modifiying it. - Packet.WriteBEUInt64(TempLong); - TempLong = 0; - CurrentlyWrittenIndex = FirstIndex; - } + Packet.WriteBEUInt8(BitsPerEntry); + Packet.WriteVarInt32(0); // Palette length is 0 + Packet.WriteVarInt32(static_cast(ChunkSectionDataArraySize)); - TempLong |= (Value << BitOffset); + UInt64 TempLong = 0; // Temporary value that will be stored into + UInt64 CurrentlyWrittenIndex = 0; // "Index" of the long that would be written to - if (FirstIndex != SecondIndex) + for (size_t Index = 0; Index < cChunkData::SectionBlockCount; Index++) { - // Part of the data is now in the second long; write the first one first - Packet.WriteBEUInt64(TempLong); - CurrentlyWrittenIndex = SecondIndex; - - TempLong = (Value >> (64 - BitOffset)); + UInt64 Value = static_cast(a_Section.m_BlockTypes[Index] << 4); + if (Index % 2 == 0) + { + Value |= a_Section.m_BlockMetas[Index / 2] & 0x0f; + } + else + { + Value |= a_Section.m_BlockMetas[Index / 2] >> 4; + } + Value &= Mask; // It shouldn't go out of bounds, but it's still worth being careful + + // Painful part where we write data into the long array. Based off of the normal code. + size_t BitPosition = Index * BitsPerEntry; + size_t FirstIndex = BitPosition / 64; + size_t SecondIndex = ((Index + 1) * BitsPerEntry - 1) / 64; + size_t BitOffset = BitPosition % 64; + + if (FirstIndex != CurrentlyWrittenIndex) + { + // Write the current data before modifiying it. + Packet.WriteBEUInt64(TempLong); + TempLong = 0; + CurrentlyWrittenIndex = FirstIndex; + } + + TempLong |= (Value << BitOffset); + + if (FirstIndex != SecondIndex) + { + // Part of the data is now in the second long; write the first one first + Packet.WriteBEUInt64(TempLong); + CurrentlyWrittenIndex = SecondIndex; + + TempLong = (Value >> (64 - BitOffset)); + } } - } - // The last long will generally not be written - Packet.WriteBEUInt64(TempLong); + // The last long will generally not be written + Packet.WriteBEUInt64(TempLong); - // Light - stored as a nibble, so we need half sizes - // As far as I know, there isn't a method to only write a range of the array - for (size_t Index = 0; Index < ChunkSectionBlocks / 2; Index++) - { - Packet.WriteBEUInt8(m_BlockLight[(StartIndex / 2) + Index]); - } - if (m_Dimension == dimOverworld) - { - // Skylight is only sent in the overworld; the nether and end do not use it - for (size_t Index = 0; Index < ChunkSectionBlocks / 2; Index++) + // Write lighting: + Packet.WriteBuf(a_Section.m_BlockLight, sizeof(a_Section.m_BlockLight)); + if (m_Dimension == dimOverworld) { - Packet.WriteBEUInt8(m_BlockSkyLight[(StartIndex / 2) + Index]); + // Skylight is only sent in the overworld; the nether and end do not use it + Packet.WriteBuf(a_Section.m_BlockSkyLight, sizeof(a_Section.m_BlockSkyLight)); } } - } + ); // Write the biome data Packet.WriteBuf(m_BiomeData, BiomeDataSize); @@ -290,102 +315,92 @@ void cChunkDataSerializer::Serialize110(AString & a_Data, int a_ChunkX, int a_Ch Packet.WriteBEInt32(a_ChunkX); Packet.WriteBEInt32(a_ChunkZ); Packet.WriteBool(true); // "Ground-up continuous", or rather, "biome data present" flag - Packet.WriteVarInt32(0x0000ffff); // We're aways sending the full chunk with no additional data, so the bitmap is 0xffff + Packet.WriteVarInt32(m_Data.GetSectionBitmask()); // Write the chunk size: - const size_t NumChunkSections = 16; - const size_t ChunkSectionBlocks = 16 * 16 * 16; const size_t BitsPerEntry = 13; const size_t Mask = (1 << BitsPerEntry) - 1; // Creates a mask that is 13 bits long, ie 0b1111111111111 - const size_t ChunkSectionDataArraySize = (ChunkSectionBlocks * BitsPerEntry) / 8 / 8; // Convert from bit count to long count + const size_t ChunkSectionDataArraySize = (cChunkData::SectionBlockCount * BitsPerEntry) / 8 / 8; // Convert from bit count to long count size_t ChunkSectionSize = ( - 1 + // Bits per block - set to 13, so the global palette is used and the palette has a length of 0 - 1 + // Palette length - 2 + // Data array length VarInt - 2 bytes for the current value - ChunkSectionDataArraySize * 8 + // Actual block data - multiplied by 8 because first number is longs - sizeof(m_BlockLight) / NumChunkSections // Block light + 1 + // Bits per block - set to 13, so the global palette is used and the palette has a length of 0 + 1 + // Palette length + 2 + // Data array length VarInt - 2 bytes for the current value + ChunkSectionDataArraySize * 8 + // Actual block data - multiplied by 8 because first number is longs + cChunkData::SectionBlockCount / 2 // Block light ); if (m_Dimension == dimOverworld) { // Sky light is only sent in the overworld. - ChunkSectionSize += sizeof(m_BlockSkyLight) / NumChunkSections; + ChunkSectionSize += cChunkData::SectionBlockCount / 2; } const size_t BiomeDataSize = cChunkDef::Width * cChunkDef::Width; size_t ChunkSize = ( - ChunkSectionSize * 16 + + ChunkSectionSize * m_Data.NumPresentSections() + BiomeDataSize ); Packet.WriteVarInt32(static_cast(ChunkSize)); // Write each chunk section... - for (size_t SectionIndex = 0; SectionIndex < 16; SectionIndex++) - { - Packet.WriteBEUInt8(BitsPerEntry); - Packet.WriteVarInt32(0); // Palette length is 0 - Packet.WriteVarInt32(static_cast(ChunkSectionDataArraySize)); - - size_t StartIndex = SectionIndex * ChunkSectionBlocks; - - UInt64 TempLong = 0; // Temporary value that will be stored into - UInt64 CurrentlyWrittenIndex = 0; // "Index" of the long that would be written to - - for (size_t Index = 0; Index < ChunkSectionBlocks; Index++) + ForEachSection(m_Data, [&](const cChunkData::sChunkSection & a_Section) { - UInt64 Value = static_cast(m_BlockTypes[StartIndex + Index] << 4); - if (Index % 2 == 0) - { - Value |= m_BlockMetas[(StartIndex + Index) / 2] & 0x0f; - } - else - { - Value |= m_BlockMetas[(StartIndex + Index) / 2] >> 4; - } - Value &= Mask; // It shouldn't go out of bounds, but it's still worth being careful + Packet.WriteBEUInt8(BitsPerEntry); + Packet.WriteVarInt32(0); // Palette length is 0 + Packet.WriteVarInt32(static_cast(ChunkSectionDataArraySize)); - // Painful part where we write data into the long array. Based off of the normal code. - size_t BitPosition = Index * BitsPerEntry; - size_t FirstIndex = BitPosition / 64; - size_t SecondIndex = ((Index + 1) * BitsPerEntry - 1) / 64; - size_t BitOffset = BitPosition % 64; + UInt64 TempLong = 0; // Temporary value that will be stored into + UInt64 CurrentlyWrittenIndex = 0; // "Index" of the long that would be written to - if (FirstIndex != CurrentlyWrittenIndex) + for (size_t Index = 0; Index < cChunkData::SectionBlockCount; Index++) { - // Write the current data before modifiying it. - Packet.WriteBEUInt64(TempLong); - TempLong = 0; - CurrentlyWrittenIndex = FirstIndex; + UInt64 Value = static_cast(a_Section.m_BlockTypes[Index] << 4); + if (Index % 2 == 0) + { + Value |= a_Section.m_BlockMetas[Index / 2] & 0x0f; + } + else + { + Value |= a_Section.m_BlockMetas[Index / 2] >> 4; + } + Value &= Mask; // It shouldn't go out of bounds, but it's still worth being careful + + // Painful part where we write data into the long array. Based off of the normal code. + size_t BitPosition = Index * BitsPerEntry; + size_t FirstIndex = BitPosition / 64; + size_t SecondIndex = ((Index + 1) * BitsPerEntry - 1) / 64; + size_t BitOffset = BitPosition % 64; + + if (FirstIndex != CurrentlyWrittenIndex) + { + // Write the current data before modifiying it. + Packet.WriteBEUInt64(TempLong); + TempLong = 0; + CurrentlyWrittenIndex = FirstIndex; + } + + TempLong |= (Value << BitOffset); + + if (FirstIndex != SecondIndex) + { + // Part of the data is now in the second long; write the first one first + Packet.WriteBEUInt64(TempLong); + CurrentlyWrittenIndex = SecondIndex; + + TempLong = (Value >> (64 - BitOffset)); + } } + // The last long will generally not be written + Packet.WriteBEUInt64(TempLong); - TempLong |= (Value << BitOffset); - - if (FirstIndex != SecondIndex) + // Write lighting: + Packet.WriteBuf(a_Section.m_BlockLight, sizeof(a_Section.m_BlockLight)); + if (m_Dimension == dimOverworld) { - // Part of the data is now in the second long; write the first one first - Packet.WriteBEUInt64(TempLong); - CurrentlyWrittenIndex = SecondIndex; - - TempLong = (Value >> (64 - BitOffset)); + // Skylight is only sent in the overworld; the nether and end do not use it + Packet.WriteBuf(a_Section.m_BlockSkyLight, sizeof(a_Section.m_BlockSkyLight)); } } - // The last long will generally not be written - Packet.WriteBEUInt64(TempLong); - - // Light - stored as a nibble, so we need half sizes - // As far as I know, there isn't a method to only write a range of the array - for (size_t Index = 0; Index < ChunkSectionBlocks / 2; Index++) - { - Packet.WriteBEUInt8(m_BlockLight[(StartIndex / 2) + Index]); - } - if (m_Dimension == dimOverworld) - { - // Skylight is only sent in the overworld; the nether and end do not use it - for (size_t Index = 0; Index < ChunkSectionBlocks / 2; Index++) - { - Packet.WriteBEUInt8(m_BlockSkyLight[(StartIndex / 2) + Index]); - } - } - } + ); // Write the biome data Packet.WriteBuf(m_BiomeData, BiomeDataSize); diff --git a/src/Protocol/ChunkDataSerializer.h b/src/Protocol/ChunkDataSerializer.h index 26cbd564a..45b0431ab 100644 --- a/src/Protocol/ChunkDataSerializer.h +++ b/src/Protocol/ChunkDataSerializer.h @@ -5,6 +5,7 @@ // - serialize chunk data to different protocol versions // - cache such serialized data for multiple clients +#include "ChunkData.h" @@ -12,10 +13,7 @@ class cChunkDataSerializer { protected: - const cChunkDef::BlockTypes & m_BlockTypes; - const cChunkDef::BlockNibbles & m_BlockMetas; - const cChunkDef::BlockNibbles & m_BlockLight; - const cChunkDef::BlockNibbles & m_BlockSkyLight; + const cChunkData & m_Data; const unsigned char * m_BiomeData; const eDimension m_Dimension; @@ -36,12 +34,9 @@ public: } ; cChunkDataSerializer( - const cChunkDef::BlockTypes & a_BlockTypes, - const cChunkDef::BlockNibbles & a_BlockMetas, - const cChunkDef::BlockNibbles & a_BlockLight, - const cChunkDef::BlockNibbles & a_BlockSkyLight, - const unsigned char * a_BiomeData, - const eDimension a_Dimension + const cChunkData & a_Data, + const unsigned char * a_BiomeData, + const eDimension a_Dimension ); const AString & Serialize(int a_Version, int a_ChunkX, int a_ChunkZ); // Returns one of the internal m_Serializations[] -- cgit v1.2.3