databaseVersion.addFileContent(fileContent); fileContent = null;
/** * This methods iterates over all {@link DatabaseVersion}s that are dirty. Dirty means that they are not in the winning * branch. All data which is contained in these dirty DatabaseVersions is added to the newDatabaseVersion, so that it * is included in the new Up. Note that only metadata is reuploaded, the actual multichunks are still in the repository. * * @param newDatabaseVersion {@link DatabaseVersion} to which dirty data should be added. */ private void addDirtyData(DatabaseVersion newDatabaseVersion) { Iterator<DatabaseVersion> dirtyDatabaseVersions = localDatabase.getDirtyDatabaseVersions(); if (!dirtyDatabaseVersions.hasNext()) { logger.log(Level.INFO, "No DIRTY data found in database (no dirty databases); Nothing to do here."); } else { logger.log(Level.INFO, "Adding DIRTY data to new database version: "); while (dirtyDatabaseVersions.hasNext()) { DatabaseVersion dirtyDatabaseVersion = dirtyDatabaseVersions.next(); logger.log(Level.INFO, "- Adding chunks/multichunks/filecontents from database version " + dirtyDatabaseVersion.getHeader()); for (ChunkEntry chunkEntry : dirtyDatabaseVersion.getChunks()) { newDatabaseVersion.addChunk(chunkEntry); } for (MultiChunkEntry multiChunkEntry : dirtyDatabaseVersion.getMultiChunks()) { newDatabaseVersion.addMultiChunk(multiChunkEntry); } for (FileContent fileContent : dirtyDatabaseVersion.getFileContents()) { newDatabaseVersion.addFileContent(fileContent); } } } }
protected DatabaseVersion createDatabaseVersionFromRow(ResultSet resultSet, boolean excludeChunkData, int fileHistoryMaxCount) throws SQLException { DatabaseVersionHeader databaseVersionHeader = createDatabaseVersionHeaderFromRow(resultSet); DatabaseVersion databaseVersion = new DatabaseVersion(); databaseVersion.setHeader(databaseVersionHeader); // Add chunk/multichunk/filecontent data if (!excludeChunkData) { Map<ChunkChecksum, ChunkEntry> chunks = chunkDao.getChunks(databaseVersionHeader.getVectorClock()); Map<MultiChunkId, MultiChunkEntry> multiChunks = multiChunkDao.getMultiChunks(databaseVersionHeader.getVectorClock()); Map<FileChecksum, FileContent> fileContents = fileContentDao.getFileContents(databaseVersionHeader.getVectorClock()); for (ChunkEntry chunk : chunks.values()) { databaseVersion.addChunk(chunk); } for (MultiChunkEntry multiChunk : multiChunks.values()) { databaseVersion.addMultiChunk(multiChunk); } for (FileContent fileContent : fileContents.values()) { databaseVersion.addFileContent(fileContent); } } // Add file histories Map<FileHistoryId, PartialFileHistory> fileHistories = fileHistoryDao .getFileHistoriesWithFileVersions(databaseVersionHeader.getVectorClock(), fileHistoryMaxCount); for (PartialFileHistory fileHistory : fileHistories.values()) { databaseVersion.addFileHistory(fileHistory); } return databaseVersion; }
newDatabaseVersion.addFileContent(fileContent);
@Override public DatabaseVersion clone() { DatabaseVersion clonedDatabaseVersion = new DatabaseVersion(); clonedDatabaseVersion.setHeader(getHeader()); for (ChunkEntry chunkEntry : getChunks()) { clonedDatabaseVersion.addChunk(chunkEntry); } for (MultiChunkEntry multiChunkEntry : getMultiChunks()) { clonedDatabaseVersion.addMultiChunk(multiChunkEntry); } for (FileContent fileContent : getFileContents()) { clonedDatabaseVersion.addFileContent(fileContent); } for (PartialFileHistory fileHistory : getFileHistories()) { clonedDatabaseVersion.addFileHistory(fileHistory); } return clonedDatabaseVersion; }
fullDatabaseVersionCache.addFileContent(sourceFileContent);