@Override public byte[] mutate(byte[] current) throws Exception { final String currentName = new String(current); if (currentName.equals(newName)) return null; // assume in this case the operation is running again, so we are done if (!currentName.equals(oldName)) { throw new AcceptableThriftTableOperationException(null, oldName, TableOperation.RENAME, TableOperationExceptionType.NAMESPACE_NOTFOUND, "Name changed while processing"); } return newName.getBytes(); } });
public static <T extends AbstractId> T getNextId(String name, ServerContext context, Function<String,T> newIdFunction) throws AcceptableThriftTableOperationException { try { IZooReaderWriter zoo = context.getZooReaderWriter(); final String ntp = context.getZooKeeperRoot() + Constants.ZTABLES; byte[] nid = zoo.mutate(ntp, ZERO_BYTE, ZooUtil.PUBLIC, currentValue -> { BigInteger nextId = new BigInteger(new String(currentValue, UTF_8), Character.MAX_RADIX); nextId = nextId.add(BigInteger.ONE); return nextId.toString(Character.MAX_RADIX).getBytes(UTF_8); }); return newIdFunction.apply(new String(nid, UTF_8)); } catch (Exception e1) { log.error("Failed to assign id to " + name, e1); throw new AcceptableThriftTableOperationException(null, name, TableOperation.CREATE, TableOperationExceptionType.OTHER, e1.getMessage()); } }
public static void checkTableDoesNotExist(ServerContext context, String tableName, Table.ID tableId, TableOperation operation) throws AcceptableThriftTableOperationException { Table.ID id = Tables.getNameToIdMap(context).get(tableName); if (id != null && !id.equals(tableId)) throw new AcceptableThriftTableOperationException(null, tableName, operation, TableOperationExceptionType.EXISTS, null); }
throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Failed to read export metadata " + ioe.getMessage()); throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible export version " + exportVersion); throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible data version " + dataVersion);
@Override public byte[] mutate(byte[] currentValue) throws Exception { String cvs = new String(currentValue, UTF_8); String[] tokens = cvs.split(","); long flushID = Long.parseLong(tokens[0]); flushID++; String txidString = String.format("%016x", tid); for (int i = 1; i < tokens.length; i++) { if (tokens[i].startsWith(txidString)) continue; // skip self log.debug("txidString : {}", txidString); log.debug("tokens[{}] : {}", i, tokens[i]); throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.OTHER, "Another compaction with iterators and/or a compaction strategy is running"); } StringBuilder encodedIterators = new StringBuilder(); if (config != null) { Hex hex = new Hex(); encodedIterators.append(","); encodedIterators.append(txidString); encodedIterators.append("="); encodedIterators.append(new String(hex.encode(config), UTF_8)); } return (Long.toString(flushID) + encodedIterators).getBytes(UTF_8); } });
public static void checkNamespaceDoesNotExist(ServerContext context, String namespace, Namespace.ID namespaceId, TableOperation operation) throws AcceptableThriftTableOperationException { Namespace.ID n = Namespaces.lookupNamespaceId(context, namespace); if (n != null && !n.equals(namespaceId)) throw new AcceptableThriftTableOperationException(null, namespace, operation, TableOperationExceptionType.NAMESPACE_EXISTS, null); }
private Map<String,String> getExportedProps(VolumeManager fs) throws Exception { Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE); try { FileSystem ns = fs.getVolumeByPath(path).getFileSystem(); return TableOperationsImpl.getExportedProps(ns, path); } catch (IOException ioe) { throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error reading table props from " + path + " " + ioe.getMessage()); } }
public CompactRange(Namespace.ID namespaceId, Table.ID tableId, byte[] startRow, byte[] endRow, List<IteratorSetting> iterators, CompactionStrategyConfig compactionStrategy) throws AcceptableThriftTableOperationException { requireNonNull(namespaceId, "Invalid argument: null namespaceId"); requireNonNull(tableId, "Invalid argument: null tableId"); requireNonNull(iterators, "Invalid argument: null iterator list"); requireNonNull(compactionStrategy, "Invalid argument: null compactionStrategy"); this.tableId = tableId; this.namespaceId = namespaceId; this.startRow = startRow.length == 0 ? null : startRow; this.endRow = endRow.length == 0 ? null : endRow; if (iterators.size() > 0 || !compactionStrategy.equals(CompactionStrategyConfigUtil.DEFAULT_STRATEGY)) { this.config = WritableUtils.toByteArray( new UserCompactionConfig(this.startRow, this.endRow, iterators, compactionStrategy)); } else { log.info("No iterators or compaction strategy"); } if (this.startRow != null && this.endRow != null && new Text(startRow).compareTo(new Text(endRow)) >= 0) throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.BAD_RANGE, "start row must be less than end row"); }
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "File referenced by exported table does not exists " + oldFileName); } catch (IOException ioe) { log.warn("{}", ioe.getMessage(), ioe); throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error renaming files " + ioe.getMessage());
try { if (!future.get()) { throw new AcceptableThriftTableOperationException(bulkInfo.tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.OTHER, "Failed to move files from " + bulkInfo.sourceDir); throw new AcceptableThriftTableOperationException(bulkInfo.tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.OTHER, ee.getCause().getMessage());
throw new AcceptableThriftTableOperationException(tableId, null, TableOperation.BULK_IMPORT, TableOperationExceptionType.OTHER, "Concurrent merge happened"); // TODO need to handle
} catch (IOException ioe) { log.warn("{}", ioe.getMessage(), ioe); throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error writing mapping file " + path + " " + ioe.getMessage());
@Override public Repo<Master> call(long tid, Master master) throws Exception { try { exportTable(master.getFileSystem(), master.getContext(), tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir); } catch (IOException ioe) { throw new AcceptableThriftTableOperationException(tableInfo.tableID.canonicalID(), tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER, "Failed to create export files " + ioe.getMessage()); } Utils.unreserveNamespace(master, tableInfo.namespaceID, tid, false); Utils.unreserveTable(master, tableInfo.tableID, tid, false); Utils.unreserveHdfsDirectory(master, new Path(tableInfo.exportDir).toString(), tid); return null; }
private void checkOffline(ClientContext context) throws Exception { if (Tables.getTableState(context, tableInfo.tableID) != TableState.OFFLINE) { Tables.clearCache(context); if (Tables.getTableState(context, tableInfo.tableID) != TableState.OFFLINE) { throw new AcceptableThriftTableOperationException(tableInfo.tableID.canonicalID(), tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER, "Table is not offline"); } } }
@Override public Repo<Master> call(long tid, Master master) throws Exception { final Path bulkDir = new Path(bulkInfo.bulkDir); final Path sourceDir = new Path(bulkInfo.sourceDir); log.debug(" tid {} sourceDir {}", tid, sourceDir); VolumeManager fs = master.getFileSystem(); if (bulkInfo.tableState == TableState.ONLINE) { ZooArbitrator.start(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid); } try { Map<String,String> oldToNewNameMap = BulkSerialize.readRenameMap(bulkDir.toString(), p -> fs.open(p)); moveFiles(String.format("%016x", tid), sourceDir, bulkDir, master, fs, oldToNewNameMap); return new LoadFiles(bulkInfo); } catch (Exception ex) { throw new AcceptableThriftTableOperationException(bulkInfo.tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_INPUT_DIRECTORY, bulkInfo.sourceDir + ": " + ex); } }
public static long reserveTable(Master env, Table.ID tableId, long tid, boolean writeLock, boolean tableMustExist, TableOperation op) throws Exception { if (getLock(env.getContext(), tableId, tid, writeLock).tryLock()) { if (tableMustExist) { IZooReaderWriter zk = env.getContext().getZooReaderWriter(); if (!zk.exists(env.getContext().getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId)) throw new AcceptableThriftTableOperationException(tableId.canonicalID(), "", op, TableOperationExceptionType.NOTFOUND, "Table does not exist"); } log.info("table {} ({}) locked for {} operation: {}", tableId, Long.toHexString(tid), (writeLock ? "write" : "read"), op); return 0; } else return 100; }
tableId, startRow, endRow); } catch (NoNodeException nne) { throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.NOTFOUND, null);
public static long reserveNamespace(Master env, Namespace.ID namespaceId, long id, boolean writeLock, boolean mustExist, TableOperation op) throws Exception { if (getLock(env.getContext(), namespaceId, id, writeLock).tryLock()) { if (mustExist) { IZooReaderWriter zk = env.getContext().getZooReaderWriter(); if (!zk.exists( env.getContext().getZooKeeperRoot() + Constants.ZNAMESPACES + "/" + namespaceId)) throw new AcceptableThriftTableOperationException(namespaceId.canonicalID(), "", op, TableOperationExceptionType.NAMESPACE_NOTFOUND, "Namespace does not exist"); } log.info("namespace {} ({}) locked for {} operation: {}", namespaceId, Long.toHexString(id), (writeLock ? "write" : "read"), op); return 0; } else return 100; }
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir + " does not exist"); if (!errorStatus.isDirectory()) throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir + " is not a directory"); if (fs.listStatus(errorPath).length != 0) throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir + " is not empty"); } catch (IOException ex) { log.error("error preparing the bulk import directory", ex); throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_INPUT_DIRECTORY, sourceDir + ": " + ex);
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, "Unable to write to " + this.errorDir);