static NodeEnvironment.NodePath getPathWithMostFreeSpace(NodeEnvironment env) throws IOException { final NodeEnvironment.NodePath[] paths = env.nodePaths(); NodeEnvironment.NodePath bestPath = null; long maxUsableBytes = Long.MIN_VALUE; for (NodeEnvironment.NodePath nodePath : paths) { FileStore fileStore = nodePath.fileStore; long usableBytes = fileStore.getUsableSpace(); assert usableBytes >= 0 : "usable bytes must be >= 0, got: " + usableBytes; if (bestPath == null || usableBytes > maxUsableBytes) { // This path has been determined to be "better" based on the usable bytes maxUsableBytes = usableBytes; bestPath = nodePath; } } return bestPath; }
/** * This method tries to write an empty file and moves it using an atomic move operation. * This method throws an {@link IllegalStateException} if this operation is * not supported by the filesystem. This test is executed on each of the data directories. * This method cleans up all files even in the case of an error. */ public void ensureAtomicMoveSupported() throws IOException { final NodePath[] nodePaths = nodePaths(); for (NodePath nodePath : nodePaths) { assert Files.isDirectory(nodePath.path) : nodePath.path + " is not a directory"; final Path src = nodePath.path.resolve(TEMP_FILE_NAME + ".tmp"); final Path target = nodePath.path.resolve(TEMP_FILE_NAME + ".final"); try { Files.deleteIfExists(src); Files.createFile(src); Files.move(src, target, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); } catch (AtomicMoveNotSupportedException ex) { throw new IllegalStateException("atomic_move is not supported by the filesystem on path [" + nodePath.path + "] atomic_move is required for elasticsearch to work correctly.", ex); } finally { try { Files.deleteIfExists(src); } finally { Files.deleteIfExists(target); } } } }
/** * Returns all shard paths excluding custom shard path. Note: Shards are only allocated on one of the * returned paths. The returned array may contain paths to non-existing directories. * * @see IndexSettings#hasCustomDataPath() * @see #resolveCustomLocation(IndexSettings, ShardId) * */ public Path[] availableShardPaths(ShardId shardId) { assertEnvIsLocked(); final NodePath[] nodePaths = nodePaths(); final Path[] shardLocations = new Path[nodePaths.length]; for (int i = 0; i < nodePaths.length; i++) { shardLocations[i] = nodePaths[i].resolve(shardId); } return shardLocations; }
statePath = env.nodePaths()[0].resolve(shardId); } else { BigInteger totFreeSpace = BigInteger.ZERO; for (NodeEnvironment.NodePath nodePath : env.nodePaths()) { totFreeSpace = totFreeSpace.add(BigInteger.valueOf(nodePath.fileStore.getUsableSpace())); final NodeEnvironment.NodePath[] paths = env.nodePaths();
public FsInfo stats(FsInfo previous, @Nullable ClusterInfo clusterInfo) throws IOException { if (!nodeEnv.hasNodeFile()) { return new FsInfo(System.currentTimeMillis(), null, new FsInfo.Path[0]); } NodePath[] dataLocations = nodeEnv.nodePaths(); FsInfo.Path[] paths = new FsInfo.Path[dataLocations.length]; for (int i = 0; i < dataLocations.length; i++) { paths[i] = getFSInfo(dataLocations[i]); } FsInfo.IoStats ioStats = null; if (Constants.LINUX) { Set<Tuple<Integer, Integer>> devicesNumbers = new HashSet<>(); for (int i = 0; i < dataLocations.length; i++) { if (dataLocations[i].majorDeviceNumber != -1 && dataLocations[i].minorDeviceNumber != -1) { devicesNumbers.add(Tuple.tuple(dataLocations[i].majorDeviceNumber, dataLocations[i].minorDeviceNumber)); } } ioStats = ioStats(devicesNumbers, previous); } DiskUsage leastDiskEstimate = null; DiskUsage mostDiskEstimate = null; if (clusterInfo != null) { leastDiskEstimate = clusterInfo.getNodeLeastAvailableDiskUsages().get(nodeEnv.nodeId()); mostDiskEstimate = clusterInfo.getNodeMostAvailableDiskUsages().get(nodeEnv.nodeId()); } return new FsInfo(System.currentTimeMillis(), ioStats, paths, leastDiskEstimate, mostDiskEstimate); }
private static boolean isDefaultPathDataInPathData(final NodeEnvironment nodeEnv, final Path defaultNodeDirectory) throws IOException { for (final NodeEnvironment.NodePath dataPath : nodeEnv.nodePaths()) { if (Files.isSameFile(dataPath.path, defaultNodeDirectory)) { return true; } } return false; }
static NodeEnvironment.NodePath getPathWithMostFreeSpace(NodeEnvironment env) throws IOException { final NodeEnvironment.NodePath[] paths = env.nodePaths(); NodeEnvironment.NodePath bestPath = null; long maxUsableBytes = Long.MIN_VALUE; for (NodeEnvironment.NodePath nodePath : paths) { FileStore fileStore = nodePath.fileStore; long usableBytes = fileStore.getUsableSpace(); assert usableBytes >= 0 : "usable bytes must be >= 0, got: " + usableBytes; if (bestPath == null || usableBytes > maxUsableBytes) { // This path has been determined to be "better" based on the usable bytes maxUsableBytes = usableBytes; bestPath = nodePath; } } return bestPath; }
/** * This method tries to write an empty file and moves it using an atomic move operation. * This method throws an {@link IllegalStateException} if this operation is * not supported by the filesystem. This test is executed on each of the data directories. * This method cleans up all files even in the case of an error. */ public void ensureAtomicMoveSupported() throws IOException { final NodePath[] nodePaths = nodePaths(); for (NodePath nodePath : nodePaths) { assert Files.isDirectory(nodePath.path) : nodePath.path + " is not a directory"; final Path src = nodePath.path.resolve("__es__.tmp"); Files.createFile(src); final Path target = nodePath.path.resolve("__es__.final"); try { Files.move(src, target, StandardCopyOption.ATOMIC_MOVE); } catch (AtomicMoveNotSupportedException ex) { throw new IllegalStateException("atomic_move is not supported by the filesystem on path [" + nodePath.path + "] atomic_move is required for elasticsearch to work correctly.", ex); } finally { Files.deleteIfExists(src); Files.deleteIfExists(target); } } }
/** * This method tries to write an empty file and moves it using an atomic move operation. * This method throws an {@link IllegalStateException} if this operation is * not supported by the filesystem. This test is executed on each of the data directories. * This method cleans up all files even in the case of an error. */ public void ensureAtomicMoveSupported() throws IOException { final NodePath[] nodePaths = nodePaths(); for (NodePath nodePath : nodePaths) { assert Files.isDirectory(nodePath.path) : nodePath.path + " is not a directory"; final Path src = nodePath.path.resolve("__es__.tmp"); final Path target = nodePath.path.resolve("__es__.final"); try { Files.createFile(src); Files.move(src, target, StandardCopyOption.ATOMIC_MOVE); } catch (AtomicMoveNotSupportedException ex) { throw new IllegalStateException("atomic_move is not supported by the filesystem on path [" + nodePath.path + "] atomic_move is required for elasticsearch to work correctly.", ex); } finally { try { Files.deleteIfExists(src); } finally { Files.deleteIfExists(target); } } } }
/** * This method tries to write an empty file and moves it using an atomic move operation. * This method throws an {@link IllegalStateException} if this operation is * not supported by the filesystem. This test is executed on each of the data directories. * This method cleans up all files even in the case of an error. */ public void ensureAtomicMoveSupported() throws IOException { final NodePath[] nodePaths = nodePaths(); for (NodePath nodePath : nodePaths) { assert Files.isDirectory(nodePath.path) : nodePath.path + " is not a directory"; final Path src = nodePath.path.resolve(TEMP_FILE_NAME + ".tmp"); final Path target = nodePath.path.resolve(TEMP_FILE_NAME + ".final"); try { Files.deleteIfExists(src); Files.createFile(src); Files.move(src, target, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); } catch (AtomicMoveNotSupportedException ex) { throw new IllegalStateException("atomic_move is not supported by the filesystem on path [" + nodePath.path + "] atomic_move is required for elasticsearch to work correctly.", ex); } finally { try { Files.deleteIfExists(src); } finally { Files.deleteIfExists(target); } } } }
/** * Returns all shard paths excluding custom shard path. Note: Shards are only allocated on one of the * returned paths. The returned array may contain paths to non-existing directories. * * @see IndexSettings#hasCustomDataPath() * @see #resolveCustomLocation(IndexSettings, ShardId) * */ public Path[] availableShardPaths(ShardId shardId) { assertEnvIsLocked(); final NodePath[] nodePaths = nodePaths(); final Path[] shardLocations = new Path[nodePaths.length]; for (int i = 0; i < nodePaths.length; i++) { shardLocations[i] = nodePaths[i].resolve(shardId); } return shardLocations; }
/** * Returns all shard paths excluding custom shard path. Note: Shards are only allocated on one of the * returned paths. The returned array may contain paths to non-existing directories. * * @see IndexSettings#hasCustomDataPath() * @see #resolveCustomLocation(IndexSettings, ShardId) * */ public Path[] availableShardPaths(ShardId shardId) { assertEnvIsLocked(); final NodePath[] nodePaths = nodePaths(); final Path[] shardLocations = new Path[nodePaths.length]; for (int i = 0; i < nodePaths.length; i++) { shardLocations[i] = nodePaths[i].resolve(shardId); } return shardLocations; }
public FsInfo stats() throws IOException { if (!nodeEnv.hasNodeFile()) { return new FsInfo(System.currentTimeMillis(), new FsInfo.Path[0]); } NodePath[] dataLocations = nodeEnv.nodePaths(); FsInfo.Path[] paths = new FsInfo.Path[dataLocations.length]; for (int i = 0; i < dataLocations.length; i++) { paths[i] = getFSInfo(dataLocations[i]); } return new FsInfo(System.currentTimeMillis(), paths); }
/** * Returns all shard paths excluding custom shard path. Note: Shards are only allocated on one of the * returned paths. The returned array may contain paths to non-existing directories. * * @see #hasCustomDataPath(org.elasticsearch.common.settings.Settings) * @see #resolveCustomLocation(org.elasticsearch.common.settings.Settings, org.elasticsearch.index.shard.ShardId) * */ public Path[] availableShardPaths(ShardId shardId) { assert assertEnvIsLocked(); final NodePath[] nodePaths = nodePaths(); final Path[] shardLocations = new Path[nodePaths.length]; for (int i = 0; i < nodePaths.length; i++) { shardLocations[i] = nodePaths[i].resolve(shardId); } return shardLocations; }
/** Maps each path.data path to a "guess" of how many bytes the shards allocated to that path might additionally use over their * lifetime; we do this so a bunch of newly allocated shards won't just all go the path with the most free space at this moment. */ private static Map<Path,Long> getEstimatedReservedBytes(NodeEnvironment env, long avgShardSizeInBytes, Iterable<IndexShard> shards) throws IOException { long totFreeSpace = 0; for (NodeEnvironment.NodePath nodePath : env.nodePaths()) { totFreeSpace += nodePath.fileStore.getUsableSpace(); } // Very rough heurisic of how much disk space we expect the shard will use over its lifetime, the max of current average // shard size across the cluster and 5% of the total available free space on this node: long estShardSizeInBytes = Math.max(avgShardSizeInBytes, (long) (totFreeSpace/20.0)); // Collate predicted (guessed!) disk usage on each path.data: Map<Path,Long> reservedBytes = new HashMap<>(); for (IndexShard shard : shards) { Path dataPath = NodeEnvironment.shardStatePathToDataPath(shard.shardPath().getShardStatePath()); // Remove indices/<index>/<shardID> subdirs from the statePath to get back to the path.data/<lockID>: Long curBytes = reservedBytes.get(dataPath); if (curBytes == null) { curBytes = 0L; } reservedBytes.put(dataPath, curBytes + estShardSizeInBytes); } return reservedBytes; }
statePath = env.nodePaths()[0].resolve(shardId); } else { BigInteger totFreeSpace = BigInteger.ZERO; for (NodeEnvironment.NodePath nodePath : env.nodePaths()) { totFreeSpace = totFreeSpace.add(BigInteger.valueOf(nodePath.fileStore.getUsableSpace())); final NodeEnvironment.NodePath[] paths = env.nodePaths(); NodeEnvironment.NodePath bestPath = null; BigInteger maxUsableBytes = BigInteger.valueOf(Long.MIN_VALUE);
public FsInfo stats(FsInfo previous) throws IOException { if (!nodeEnv.hasNodeFile()) { return new FsInfo(System.currentTimeMillis(), null, new FsInfo.Path[0]); } NodePath[] dataLocations = nodeEnv.nodePaths(); FsInfo.Path[] paths = new FsInfo.Path[dataLocations.length]; for (int i = 0; i < dataLocations.length; i++) { paths[i] = getFSInfo(dataLocations[i]); } FsInfo.IoStats ioStats = null; if (Constants.LINUX) { Set<Tuple<Integer, Integer>> devicesNumbers = new HashSet<>(); for (int i = 0; i < dataLocations.length; i++) { if (dataLocations[i].majorDeviceNumber != -1 && dataLocations[i].minorDeviceNumber != -1) { devicesNumbers.add(Tuple.tuple(dataLocations[i].majorDeviceNumber, dataLocations[i].minorDeviceNumber)); } } ioStats = ioStats(devicesNumbers, previous); } return new FsInfo(System.currentTimeMillis(), ioStats, paths); }
for (NodeEnvironment.NodePath nodePath : nodeEnv.nodePaths()) { final Path indexFolderPath = nodePath.indicesPath.resolve(indexFolderName); final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, indexFolderPath);
public FsInfo stats(FsInfo previous, @Nullable ClusterInfo clusterInfo) throws IOException { if (!nodeEnv.hasNodeFile()) { return new FsInfo(System.currentTimeMillis(), null, new FsInfo.Path[0]); } NodePath[] dataLocations = nodeEnv.nodePaths(); FsInfo.Path[] paths = new FsInfo.Path[dataLocations.length]; for (int i = 0; i < dataLocations.length; i++) { paths[i] = getFSInfo(dataLocations[i]); } FsInfo.IoStats ioStats = null; if (Constants.LINUX) { Set<Tuple<Integer, Integer>> devicesNumbers = new HashSet<>(); for (int i = 0; i < dataLocations.length; i++) { if (dataLocations[i].majorDeviceNumber != -1 && dataLocations[i].minorDeviceNumber != -1) { devicesNumbers.add(Tuple.tuple(dataLocations[i].majorDeviceNumber, dataLocations[i].minorDeviceNumber)); } } ioStats = ioStats(devicesNumbers, previous); } DiskUsage leastDiskEstimate = null; DiskUsage mostDiskEstimate = null; if (clusterInfo != null) { leastDiskEstimate = clusterInfo.getNodeLeastAvailableDiskUsages().get(nodeEnv.nodeId()); mostDiskEstimate = clusterInfo.getNodeMostAvailableDiskUsages().get(nodeEnv.nodeId()); } return new FsInfo(System.currentTimeMillis(), ioStats, paths, leastDiskEstimate, mostDiskEstimate); }
public FsInfo stats(FsInfo previous, @Nullable ClusterInfo clusterInfo) throws IOException { if (!nodeEnv.hasNodeFile()) { return new FsInfo(System.currentTimeMillis(), null, new FsInfo.Path[0]); } NodePath[] dataLocations = nodeEnv.nodePaths(); FsInfo.Path[] paths = new FsInfo.Path[dataLocations.length]; for (int i = 0; i < dataLocations.length; i++) { paths[i] = getFSInfo(dataLocations[i]); } FsInfo.IoStats ioStats = null; if (Constants.LINUX) { Set<Tuple<Integer, Integer>> devicesNumbers = new HashSet<>(); for (int i = 0; i < dataLocations.length; i++) { if (dataLocations[i].majorDeviceNumber != -1 && dataLocations[i].minorDeviceNumber != -1) { devicesNumbers.add(Tuple.tuple(dataLocations[i].majorDeviceNumber, dataLocations[i].minorDeviceNumber)); } } ioStats = ioStats(devicesNumbers, previous); } DiskUsage leastDiskEstimate = null; DiskUsage mostDiskEstimate = null; if (clusterInfo != null) { leastDiskEstimate = clusterInfo.getNodeLeastAvailableDiskUsages().get(nodeEnv.nodeId()); mostDiskEstimate = clusterInfo.getNodeMostAvailableDiskUsages().get(nodeEnv.nodeId()); } return new FsInfo(System.currentTimeMillis(), ioStats, paths, leastDiskEstimate, mostDiskEstimate); }