public static void checkFileSize(File indexFile) throws IOException { final long fileSize = indexFile.length(); if (fileSize > Integer.MAX_VALUE) { throw new IOE("File[%s] too large[%d]", indexFile, fileSize); } }
public static int getVersionFromDir(File inDir) throws IOException { File versionFile = new File(inDir, "version.bin"); if (versionFile.exists()) { return Ints.fromByteArray(Files.toByteArray(versionFile)); } final File indexFile = new File(inDir, "index.drd"); int version; if (indexFile.exists()) { try (InputStream in = new FileInputStream(indexFile)) { version = in.read(); } return version; } throw new IOE("Invalid segment dir [%s]. Can't find either of version.bin or index.drd.", inDir); } }
private void copyFilesWithChecks(final FileSystem fs, final Path from, final Path to) throws IOException { if (!HadoopFsWrapper.rename(fs, from, to)) { if (fs.exists(to)) { log.info( "Unable to rename temp file [%s] to segment path [%s], it may have already been pushed by a replica task.", from, to ); } else { throw new IOE("Failed to rename temp file [%s] and final segment path [%s] is not present.", from, to); } } }
private String getCurrentKnownLeader(final boolean cached) throws IOException { final String leader = currentKnownLeader.accumulateAndGet( null, (current, given) -> current == null || !cached ? pickOneHost() : current ); if (leader == null) { throw new IOE("No known server"); } else { return leader; } }
@Override public InputStream getInputStream(URI uri) throws IOException { try { return buildFileObject(uri).openInputStream(); } catch (AmazonServiceException e) { throw new IOE(e, "Could not load URI [%s]", uri); } }
throw new IOE("taskLogDir [%s] must be a directory.", taskLogDir);
@Override public long push() throws IOException { try { progressable.progress(); if (outputFS.exists(descriptorPath)) { if (!outputFS.delete(descriptorPath, false)) { throw new IOE("Failed to delete descriptor at [%s]", descriptorPath); } } try (final OutputStream descriptorOut = outputFS.create( descriptorPath, true, DEFAULT_FS_BUFFER_SIZE, progressable )) { HadoopDruidIndexerConfig.JSON_MAPPER.writeValue(descriptorOut, segment); } } catch (RuntimeException | IOException ex) { log.info(ex, "Exception in descriptor pusher retry loop"); throw ex; } return -1; } },
@Override public void pushTaskLog(final String taskid, File file) throws IOException { if (config.getDirectory().exists() || config.getDirectory().mkdirs()) { final File outputFile = fileForTask(taskid, file.getName()); Files.copy(file, outputFile); log.info("Wrote task log to: %s", outputFile); } else { throw new IOE("Unable to create task log dir[%s]", config.getDirectory()); } }
@Override public void pushTaskReports(String taskid, File reportFile) throws IOException { if (config.getDirectory().exists() || config.getDirectory().mkdirs()) { final File outputFile = fileForTask(taskid, reportFile.getName()); Files.copy(reportFile, outputFile); log.info("Wrote task report to: %s", outputFile); } else { throw new IOE("Unable to create task report dir[%s]", config.getDirectory()); } }
throw new IOE(e, "Failed to stream logs from: %s", taskKey);
@Override public void killOlderThan(long timestamp) throws IOException { Path taskLogDir = new Path(config.getDirectory()); FileSystem fs = taskLogDir.getFileSystem(hadoopConfig); if (fs.exists(taskLogDir)) { if (!fs.isDirectory(taskLogDir)) { throw new IOE("taskLogDir [%s] must be a directory.", taskLogDir); } RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(taskLogDir); while (iter.hasNext()) { LocatedFileStatus file = iter.next(); if (file.getModificationTime() < timestamp) { Path p = file.getPath(); log.info("Deleting hdfs task log [%s].", p.toUri().toString()); fs.delete(p, true); } if (Thread.currentThread().isInterrupted()) { throw new IOException( new InterruptedException("Thread interrupted. Couldn't delete all tasklogs.") ); } } } } }
fs.rename(intermediateHdfsPath, hdfsPath); if (!fs.exists(hdfsPath)) { throw new IOE("File does not exist even after moving from[%s] to [%s]", intermediateHdfsPath, hdfsPath);
objects.add(S3Utils.getSingleObjectSummary(s3Client, bucket, prefix)); } else { throw new IOE( "[%s] is a directory placeholder, " + "but failed to get the object list under the directory due to permission",
@Override public void close() throws IOException { open = false; internalFiles.put(name, new Metadata(currOut.getFileNum(), startOffset, currOut.getCurrOffset())); writerCurrentlyInUse = false; if (bytesWritten != currOut.getCurrOffset() - startOffset) { throw new ISE("WTF? Perhaps there is some concurrent modification going on?"); } if (bytesWritten != size) { throw new IOE("Expected [%,d] bytes, only saw [%,d], potential corruption?", size, bytesWritten); } // Merge temporary files on to the main smoosh file. mergeWithSmoosher(); } };
return Optional.absent(); } else { throw new IOE(e, "Failed to stream logs from: %s", taskKey);
throw new IOE(e, "Failed to stream logs from: %s", taskKey);