diff options
Diffstat (limited to 'src/WorldStorage/WSSAnvil.cpp')
-rw-r--r-- | src/WorldStorage/WSSAnvil.cpp | 60 |
1 files changed, 41 insertions, 19 deletions
diff --git a/src/WorldStorage/WSSAnvil.cpp b/src/WorldStorage/WSSAnvil.cpp index e79cc291d..4d2f92173 100644 --- a/src/WorldStorage/WSSAnvil.cpp +++ b/src/WorldStorage/WSSAnvil.cpp @@ -66,8 +66,17 @@ Since only the header is actually in the memory, this number can be high, but st */ #define MAX_MCA_FILES 32 -/// The maximum size of an inflated chunk; raw chunk data is 192 KiB, allow 64 KiB more of entities -#define CHUNK_INFLATE_MAX 256 KiB +#define LOAD_FAILED(CHX, CHZ) \ + { \ + const int RegionX = FAST_FLOOR_DIV(CHX, 32); \ + const int RegionZ = FAST_FLOOR_DIV(CHZ, 32); \ + LOGERROR("%s (%d): Loading chunk [%d, %d] from file r.%d.%d.mca failed. " \ + "The server will now abort in order to avoid further data loss. " \ + "Please add the reported file and this message to the issue report.", \ + __FUNCTION__, __LINE__, CHX, CHZ, RegionX, RegionZ \ + ); \ + *((volatile int *)0) = 0; /* Crash intentionally */ \ + } @@ -248,29 +257,22 @@ cWSSAnvil::cMCAFile * cWSSAnvil::LoadMCAFile(const cChunkCoords & a_Chunk) bool cWSSAnvil::LoadChunkFromData(const cChunkCoords & a_Chunk, const AString & a_Data) { - // Decompress the data: - char Uncompressed[CHUNK_INFLATE_MAX]; - z_stream strm; - strm.zalloc = (alloc_func)NULL; - strm.zfree = (free_func)NULL; - strm.opaque = NULL; - inflateInit(&strm); - strm.next_out = (Bytef *)Uncompressed; - strm.avail_out = sizeof(Uncompressed); - strm.next_in = (Bytef *)a_Data.data(); - strm.avail_in = (uInt)a_Data.size(); - int res = inflate(&strm, Z_FINISH); - inflateEnd(&strm); - if (res != Z_STREAM_END) + // Uncompress the data: + AString Uncompressed; + int res = InflateString(a_Data.data(), a_Data.size(), Uncompressed); + if (res != Z_OK) { + LOGWARNING("Uncompressing chunk [%d, %d] failed: %d", a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, res); + LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ); return false; } // Parse the NBT data: - cParsedNBT NBT(Uncompressed, strm.total_out); + cParsedNBT NBT(Uncompressed.data(), Uncompressed.size()); if (!NBT.IsValid()) { // NBT Parsing failed + LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ); return false; } @@ -317,11 +319,13 @@ bool cWSSAnvil::LoadChunkFromNBT(const cChunkCoords & a_Chunk, const cParsedNBT int Level = a_NBT.FindChildByName(0, "Level"); if (Level < 0) { + LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ); return false; } int Sections = a_NBT.FindChildByName(Level, "Sections"); if ((Sections < 0) || (a_NBT.GetType(Sections) != TAG_List) || (a_NBT.GetChildrenType(Sections) != TAG_Compound)) { + LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ); return false; } for (int Child = a_NBT.GetFirstChild(Sections); Child >= 0; Child = a_NBT.GetNextSibling(Child)) @@ -2811,30 +2815,42 @@ bool cWSSAnvil::cMCAFile::GetChunkData(const cChunkCoords & a_Chunk, AString & a } unsigned ChunkLocation = ntohl(m_Header[LocalX + 32 * LocalZ]); unsigned ChunkOffset = ChunkLocation >> 8; + if (ChunkOffset <= 2) + { + return false; + } m_File.Seek((int)ChunkOffset * 4096); int ChunkSize = 0; if (m_File.Read(&ChunkSize, 4) != 4) { + LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ); return false; } ChunkSize = ntohl((u_long)ChunkSize); char CompressionType = 0; if (m_File.Read(&CompressionType, 1) != 1) { + LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ); return false; } if (CompressionType != 2) { // Chunk is in an unknown compression + LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ); return false; } ChunkSize--; // HACK: This depends on the internal knowledge that AString's data() function returns the internal buffer directly a_Data.assign(ChunkSize, '\0'); - return (m_File.Read((void *)a_Data.data(), ChunkSize) == ChunkSize); + if (m_File.Read((void *)a_Data.data(), ChunkSize) == ChunkSize) + { + return true; + } + LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ); + return false; } @@ -2889,7 +2905,13 @@ bool cWSSAnvil::cMCAFile::SetChunkData(const cChunkCoords & a_Chunk, const AStri // Store the header: ChunkSize = ((u_long)a_Data.size() + MCA_CHUNK_HEADER_LENGTH + 4095) / 4096; // Round data size *up* to nearest 4KB sector, make it a sector number - ASSERT(ChunkSize < 256); + if (ChunkSize > 255) + { + LOGWARNING("Cannot save chunk [%d, %d], the data is too large (%u KiB, maximum is 1024 KiB). Remove some entities and retry.", + a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, (unsigned)(ChunkSize * 4) + ); + return false; + } m_Header[LocalX + 32 * LocalZ] = htonl((ChunkSector << 8) | ChunkSize); if (m_File.Seek(0) < 0) { |