diff options
author | madmaxoft@gmail.com <madmaxoft@gmail.com@0a769ca7-a7f5-676a-18bf-c427514a06d6> | 2012-02-16 14:42:35 +0100 |
---|---|---|
committer | madmaxoft@gmail.com <madmaxoft@gmail.com@0a769ca7-a7f5-676a-18bf-c427514a06d6> | 2012-02-16 14:42:35 +0100 |
commit | 423f49d175d8db6283232bd8bdc106e26bdcff4d (patch) | |
tree | 5c5cc58d1061c91d2cb39021853ce72ab59839ae /source/WSSCompact.cpp | |
parent | Chunks are properly saved before being unloaded now (diff) | |
download | cuberite-423f49d175d8db6283232bd8bdc106e26bdcff4d.tar cuberite-423f49d175d8db6283232bd8bdc106e26bdcff4d.tar.gz cuberite-423f49d175d8db6283232bd8bdc106e26bdcff4d.tar.bz2 cuberite-423f49d175d8db6283232bd8bdc106e26bdcff4d.tar.lz cuberite-423f49d175d8db6283232bd8bdc106e26bdcff4d.tar.xz cuberite-423f49d175d8db6283232bd8bdc106e26bdcff4d.tar.zst cuberite-423f49d175d8db6283232bd8bdc106e26bdcff4d.zip |
Diffstat (limited to 'source/WSSCompact.cpp')
-rw-r--r-- | source/WSSCompact.cpp | 189 |
1 files changed, 139 insertions, 50 deletions
diff --git a/source/WSSCompact.cpp b/source/WSSCompact.cpp index b2c2d0bb9..c023875a5 100644 --- a/source/WSSCompact.cpp +++ b/source/WSSCompact.cpp @@ -8,6 +8,11 @@ #include "cWorld.h"
#include "zlib.h"
#include <json/json.h>
+#include "StringCompression.h"
+#include "cChestEntity.h"
+#include "cSignEntity.h"
+#include "cFurnaceEntity.h"
+#include "BlockID.h"
@@ -53,7 +58,7 @@ cWSSCompact::~cWSSCompact() -bool cWSSCompact::LoadChunk(const cChunkPtr & a_Chunk)
+bool cWSSCompact::LoadChunk(const cChunkCoords & a_Chunk)
{
cPAKFile * f = LoadPAKFile(a_Chunk);
if (f == NULL)
@@ -62,14 +67,14 @@ bool cWSSCompact::LoadChunk(const cChunkPtr & a_Chunk) return false;
}
- return f->LoadChunk(a_Chunk);
+ return f->LoadChunk(a_Chunk, m_World);
}
-bool cWSSCompact::SaveChunk(const cChunkPtr & a_Chunk)
+bool cWSSCompact::SaveChunk(const cChunkCoords & a_Chunk)
{
cPAKFile * f = LoadPAKFile(a_Chunk);
if (f == NULL)
@@ -77,18 +82,18 @@ bool cWSSCompact::SaveChunk(const cChunkPtr & a_Chunk) // For some reason we couldn't locate the file
return false;
}
- return f->SaveChunk(a_Chunk);
+ return f->SaveChunk(a_Chunk, m_World);
}
-cWSSCompact::cPAKFile * cWSSCompact::LoadPAKFile(const cChunkPtr & a_Chunk)
+cWSSCompact::cPAKFile * cWSSCompact::LoadPAKFile(const cChunkCoords & a_Chunk)
{
// We need to retain this weird conversion code, because some edge chunks are in the wrong PAK file
- const int LayerX = (int)(floorf((float)a_Chunk->GetPosX() / 32.0f));
- const int LayerZ = (int)(floorf((float)a_Chunk->GetPosZ() / 32.0f));
+ const int LayerX = (int)(floorf((float)a_Chunk.m_ChunkX / 32.0f));
+ const int LayerZ = (int)(floorf((float)a_Chunk.m_ChunkZ / 32.0f));
// Is it already cached?
for (cPAKFiles::iterator itr = m_PAKFiles.begin(); itr != m_PAKFiles.end(); ++itr)
@@ -207,10 +212,10 @@ cWSSCompact::cPAKFile::~cPAKFile() -bool cWSSCompact::cPAKFile::LoadChunk(const cChunkPtr & a_Chunk)
+bool cWSSCompact::cPAKFile::LoadChunk(const cChunkCoords & a_Chunk, cWorld * a_World)
{
- int ChunkX = a_Chunk->GetPosX();
- int ChunkZ = a_Chunk->GetPosZ();
+ int ChunkX = a_Chunk.m_ChunkX;
+ int ChunkZ = a_Chunk.m_ChunkZ;
sChunkHeader * Header = NULL;
int Offset = 0;
for (sChunkHeaders::iterator itr = m_ChunkHeaders.begin(); itr != m_ChunkHeaders.end(); ++itr)
@@ -228,16 +233,16 @@ bool cWSSCompact::cPAKFile::LoadChunk(const cChunkPtr & a_Chunk) return false;
}
- return LoadChunk(a_Chunk, Offset, Header);
+ return LoadChunk(a_Chunk, Offset, Header, a_World);
}
-bool cWSSCompact::cPAKFile::SaveChunk(const cChunkPtr & a_Chunk)
+bool cWSSCompact::cPAKFile::SaveChunk(const cChunkCoords & a_Chunk, cWorld * a_World)
{
- if (!SaveChunkToData(a_Chunk))
+ if (!SaveChunkToData(a_Chunk, a_World))
{
return false;
}
@@ -252,50 +257,63 @@ bool cWSSCompact::cPAKFile::SaveChunk(const cChunkPtr & a_Chunk) -bool cWSSCompact::cPAKFile::LoadChunk(const cChunkPtr & a_Chunk, int a_Offset, sChunkHeader * a_Header)
+bool cWSSCompact::cPAKFile::LoadChunk(const cChunkCoords & a_Chunk, int a_Offset, sChunkHeader * a_Header, cWorld * a_World)
{
+ // Crude data integrity check:
+ if (a_Header->m_UncompressedSize < cChunk::c_BlockDataSize)
+ {
+ LOGWARNING("Chunk [%d, %d] has too short decompressed data (%d out of %d needed), erasing",
+ a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ,
+ a_Header->m_UncompressedSize, cChunk::c_BlockDataSize
+ );
+ EraseChunk(a_Chunk);
+ m_NumDirty++;
+ return false;
+ }
+
// Decompress the data:
- uLongf DestSize = a_Header->m_UncompressedSize;
- std::auto_ptr<char> BlockData(new char[ DestSize ]);
- int errorcode = uncompress( (Bytef*)BlockData.get(), &DestSize, (Bytef*)m_DataContents.data() + a_Offset, a_Header->m_CompressedSize );
+ AString UncompressedData;
+ int errorcode = UncompressString(m_DataContents.data() + a_Offset, a_Header->m_CompressedSize, UncompressedData, a_Header->m_UncompressedSize);
if (errorcode != Z_OK)
{
LOGERROR("Error %d decompressing data for chunk [%d, %d] from file \"%s\"",
errorcode,
- a_Chunk->GetPosX(), a_Chunk->GetPosZ(),
+ a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ,
m_FileName.c_str()
);
return false;
}
- if (a_Header->m_UncompressedSize != DestSize)
+ if (a_Header->m_UncompressedSize != (int)UncompressedData.size())
{
LOGWARNING("Uncompressed data size differs (exp %d, got %d) for chunk [%d, %d] from file \"%s\"",
- a_Header->m_UncompressedSize, DestSize,
- a_Chunk->GetPosX(), a_Chunk->GetPosZ(),
+ a_Header->m_UncompressedSize, UncompressedData.size(),
+ a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ,
m_FileName.c_str()
);
return false;
}
- a_Chunk->CopyBlockDataFrom(BlockData.get());
- a_Chunk->SetValid();
-
- if (DestSize > cChunk::c_BlockDataSize ) // We gots some extra data :D
+ cEntityList Entities;
+ cBlockEntityList BlockEntities;
+
+ if (a_Header->m_UncompressedSize > cChunk::c_BlockDataSize ) // We gots some extra data :D
{
LOGINFO("Parsing trailing JSON");
Json::Value root; // will contain the root value after parsing.
Json::Reader reader;
- if ( !reader.parse( BlockData.get() + cChunk::c_BlockDataSize, root, false ) )
+ if ( !reader.parse( UncompressedData.data() + cChunk::c_BlockDataSize, root, false ) )
{
LOGERROR("Failed to parse trailing JSON!");
}
else
{
- a_Chunk->LoadFromJson( root );
+ LoadEntitiesFromJson(root, Entities, BlockEntities, a_World);
}
}
+ a_World->ChunkDataLoaded(a_Chunk.m_ChunkX, 0, a_Chunk.m_ChunkZ, UncompressedData.data(), Entities, BlockEntities);
+
return true;
}
@@ -303,11 +321,10 @@ bool cWSSCompact::cPAKFile::LoadChunk(const cChunkPtr & a_Chunk, int a_Offset, s -void cWSSCompact::cPAKFile::EraseChunk(const cChunkPtr & a_Chunk)
+void cWSSCompact::cPAKFile::EraseChunk(const cChunkCoords & a_Chunk)
{
- int ChunkX = a_Chunk->GetPosX();
- int ChunkZ = a_Chunk->GetPosZ();
- sChunkHeader * Header = NULL;
+ int ChunkX = a_Chunk.m_ChunkX;
+ int ChunkZ = a_Chunk.m_ChunkZ;
int Offset = 0;
for (sChunkHeaders::iterator itr = m_ChunkHeaders.begin(); itr != m_ChunkHeaders.end(); ++itr)
{
@@ -326,47 +343,52 @@ void cWSSCompact::cPAKFile::EraseChunk(const cChunkPtr & a_Chunk) -bool cWSSCompact::cPAKFile::SaveChunkToData(const cChunkPtr & a_Chunk)
+bool cWSSCompact::cPAKFile::SaveChunkToData(const cChunkCoords & a_Chunk, cWorld * a_World)
{
- // Erase any existing data for the chunk:
- EraseChunk(a_Chunk);
-
// Serialize the chunk:
+ cJsonChunkSerializer Serializer;
+ a_World->GetChunkData(a_Chunk.m_ChunkX, 0, a_Chunk.m_ChunkZ, &Serializer);
+ if (Serializer.GetBlockData().empty())
+ {
+ // Chunk not valid
+ return false;
+ }
+
AString Data;
- Data.assign(a_Chunk->pGetBlockData(), cChunk::c_BlockDataSize);
- Json::Value root;
- a_Chunk->SaveToJson( root );
- if (!root.empty())
+ std::swap(Serializer.GetBlockData(), Data);
+ if (Serializer.HasJsonData())
{
AString JsonData;
Json::StyledWriter writer;
- JsonData = writer.write( root );
+ JsonData = writer.write(Serializer.GetRoot());
Data.append(JsonData);
}
// Compress the data:
- uLongf CompressedSize = compressBound(Data.size());
- std::auto_ptr<char> Compressed(new char[CompressedSize]);
- int errorcode = compress2( (Bytef*)Compressed.get(), &CompressedSize, (const Bytef*)Data.data(), Data.size(), Z_DEFAULT_COMPRESSION);
+ AString CompressedData;
+ int errorcode = CompressString(Data.data(), Data.size(), CompressedData);
if ( errorcode != Z_OK )
{
- LOGERROR("Error %i compressing data for chunk [%d, %d]", errorcode, a_Chunk->GetPosX(), a_Chunk->GetPosZ() );
+ LOGERROR("Error %i compressing data for chunk [%d, %d]", errorcode, a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ);
return false;
}
+ // Erase any existing data for the chunk:
+ EraseChunk(a_Chunk);
+
// Save the header:
sChunkHeader * Header = new sChunkHeader;
if (Header == NULL)
{
return false;
}
- Header->m_CompressedSize = CompressedSize;
- Header->m_ChunkX = a_Chunk->GetPosX();
- Header->m_ChunkZ = a_Chunk->GetPosZ();
- Header->m_UncompressedSize = Data.size();
+ Header->m_CompressedSize = (int)CompressedData.size();
+ Header->m_ChunkX = a_Chunk.m_ChunkX;
+ Header->m_ChunkZ = a_Chunk.m_ChunkZ;
+ Header->m_UncompressedSize = (int)Data.size();
m_ChunkHeaders.push_back(Header);
- m_DataContents.append(Compressed.get(), CompressedSize);
+ m_DataContents.append(CompressedData.data(), CompressedData.size());
m_NumDirty++;
return true;
@@ -402,7 +424,7 @@ void cWSSCompact::cPAKFile::SynchronizeFile(void) {
WRITE(**itr);
}
- if (f.Write(m_DataContents.data(), m_DataContents.size()) != m_DataContents.size())
+ if (f.Write(m_DataContents.data(), m_DataContents.size()) != (int)m_DataContents.size())
{
LOGERROR("cWSSCompact: ERROR writing chunk contents to file \"%s\" (line %d); file offset %d", m_FileName.c_str(), __LINE__, f.Tell());
return;
@@ -413,3 +435,70 @@ void cWSSCompact::cPAKFile::SynchronizeFile(void) +
+void cWSSCompact::cPAKFile::LoadEntitiesFromJson(Json::Value & a_Value, cEntityList & a_Entities, cBlockEntityList & a_BlockEntities, cWorld * a_World)
+{
+ // Load chests
+ Json::Value AllChests = a_Value.get("Chests", Json::nullValue);
+ if (!AllChests.empty())
+ {
+ for (Json::Value::iterator itr = AllChests.begin(); itr != AllChests.end(); ++itr )
+ {
+ Json::Value & Chest = *itr;
+ cChestEntity * ChestEntity = new cChestEntity(0,0,0, a_World);
+ if (!ChestEntity->LoadFromJson( Chest ) )
+ {
+ LOGERROR("ERROR READING CHEST FROM JSON!" );
+ delete ChestEntity;
+ }
+ else
+ {
+ a_BlockEntities.push_back( ChestEntity );
+ }
+ } // for itr - AllChests[]
+ }
+
+ // Load furnaces
+ Json::Value AllFurnaces = a_Value.get("Furnaces", Json::nullValue);
+ if( !AllFurnaces.empty() )
+ {
+ for( Json::Value::iterator itr = AllFurnaces.begin(); itr != AllFurnaces.end(); ++itr )
+ {
+ Json::Value & Furnace = *itr;
+ cFurnaceEntity * FurnaceEntity = new cFurnaceEntity(0,0,0, a_World);
+ if( !FurnaceEntity->LoadFromJson( Furnace ) )
+ {
+ LOGERROR("ERROR READING FURNACE FROM JSON!" );
+ delete FurnaceEntity;
+ }
+ else
+ {
+ a_BlockEntities.push_back( FurnaceEntity );
+ }
+ } // for itr - AllFurnaces[]
+ }
+
+ // Load signs
+ Json::Value AllSigns = a_Value.get("Signs", Json::nullValue);
+ if( !AllSigns.empty() )
+ {
+ for( Json::Value::iterator itr = AllSigns.begin(); itr != AllSigns.end(); ++itr )
+ {
+ Json::Value & Sign = *itr;
+ cSignEntity * SignEntity = new cSignEntity( E_BLOCK_SIGN_POST, 0,0,0, a_World);
+ if ( !SignEntity->LoadFromJson( Sign ) )
+ {
+ LOGERROR("ERROR READING SIGN FROM JSON!" );
+ delete SignEntity;
+ }
+ else
+ {
+ a_BlockEntities.push_back( SignEntity );
+ }
+ } // for itr - AllSigns[]
+ }
+}
+
+
+
+
|