public Order deepCopy() { return new Order(this); }
public T addSortCol(String col, int order) { if (sortCols == null) sortCols = new ArrayList<>(); sortCols.add(new Order(col, order)); return child; }
private List<Order> convertToOrders(List<MOrder> mkeys) { List<Order> keys = null; if (mkeys != null) { keys = new ArrayList<>(mkeys.size()); for (MOrder part : mkeys) { keys.add(new Order(part.getCol(), part.getOrder())); } } return keys; }
@Override public void apply(StorageDescriptor t, Object[] fields) { if (fields[2] == null) return; t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2]))); }}); }
@Override public void apply(StorageDescriptor t, Object[] fields) { if (finalCounter > 1 && fields[2] == null) { return; } Order order = new Order(); if (finalCounter > 0) { order.setCol((String) fields[1]); } if (finalCounter > 1) { order.setOrder(extractSqlInt(fields[2])); } t.addToSortCols(order); }}); }
private static StorageDescriptor makeStorageDescriptor(String tableName, List<Column> columns, Storage storage) { if (storage.isSkewed()) { throw new IllegalArgumentException("Writing to skewed table/partition is not supported"); } SerDeInfo serdeInfo = new SerDeInfo(); serdeInfo.setName(tableName); serdeInfo.setSerializationLib(storage.getStorageFormat().getSerDeNullable()); serdeInfo.setParameters(storage.getSerdeParameters()); StorageDescriptor sd = new StorageDescriptor(); sd.setLocation(emptyToNull(storage.getLocation())); sd.setCols(columns.stream() .map(ThriftMetastoreUtil::toMetastoreApiFieldSchema) .collect(toList())); sd.setSerdeInfo(serdeInfo); sd.setInputFormat(storage.getStorageFormat().getInputFormatNullable()); sd.setOutputFormat(storage.getStorageFormat().getOutputFormatNullable()); sd.setParameters(ImmutableMap.of()); Optional<HiveBucketProperty> bucketProperty = storage.getBucketProperty(); if (bucketProperty.isPresent()) { sd.setNumBuckets(bucketProperty.get().getBucketCount()); sd.setBucketCols(bucketProperty.get().getBucketedBy()); if (!bucketProperty.get().getSortedBy().isEmpty()) { sd.setSortCols(bucketProperty.get().getSortedBy().stream() .map(column -> new Order(column.getColumnName(), column.getOrder().getHiveOrder())) .collect(toList())); } } return sd; }
protected List<Order> getColumnNamesOrder(ASTNode ast) throws SemanticException { List<Order> colList = new ArrayList<Order>(); int numCh = ast.getChildCount(); for (int i = 0; i < numCh; i++) { ASTNode child = (ASTNode) ast.getChild(i); if (child.getToken().getType() == HiveParser.TOK_TABSORTCOLNAMEASC) { child = (ASTNode) child.getChild(0); colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(), HIVE_COLUMN_ORDER_ASC)); } else { child = (ASTNode) child.getChild(0); if (child.getToken().getType() == HiveParser.TOK_NULLS_LAST) { colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(), HIVE_COLUMN_ORDER_DESC)); } else { throw new SemanticException("create/alter table: " + "not supported NULLS FIRST for ORDER BY in DESC order"); } } } return colList; }
List<Order> __this__sortCols = new ArrayList<Order>(other.sortCols.size()); for (Order other_element : other.sortCols) { __this__sortCols.add(new Order(other_element));
for (SortCol sortCol : sortCols) { if (sortCol.getIndexes().get(0) < partn.getCols().size()) { newSortCols.add(new Order( partn.getCols().get(sortCol.getIndexes().get(0)).getName(), sortCol.getSortOrder() == '+' ? BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC :
protected List<Order> getColumnNamesOrder(ASTNode ast) throws SemanticException { List<Order> colList = new ArrayList<Order>(); int numCh = ast.getChildCount(); for (int i = 0; i < numCh; i++) { ASTNode child = (ASTNode) ast.getChild(i); if (child.getToken().getType() == HiveParser.TOK_TABSORTCOLNAMEASC) { child = (ASTNode) child.getChild(0); if (child.getToken().getType() == HiveParser.TOK_NULLS_FIRST) { colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(), HIVE_COLUMN_ORDER_ASC)); } else { throw new SemanticException("create/alter table: " + "not supported NULLS LAST for ORDER BY in ASC order"); } } else { child = (ASTNode) child.getChild(0); if (child.getToken().getType() == HiveParser.TOK_NULLS_LAST) { colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(), HIVE_COLUMN_ORDER_DESC)); } else { throw new SemanticException("create/alter table: " + "not supported NULLS FIRST for ORDER BY in DESC order"); } } } return colList; }
for (SortCol sortCol : sortCols) { if (sortCol.getIndexes().get(0) < partn.getCols().size()) { newSortCols.add(new Order( partn.getCols().get(sortCol.getIndexes().get(0)).getName(), sortCol.getSortOrder() == '+' ? BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC :
for (int _i180 = 0; _i180 < _list178.size; ++_i180) _elem179 = new Order(); _elem179.read(iprot); struct.sortCols.add(_elem179);
for (int _i201 = 0; _i201 < _list199.size; ++_i201) _elem200 = new Order(); _elem200.read(iprot); struct.sortCols.add(_elem200);
@Test public void sortedTable() throws Exception { List<Order> sortCols = new ArrayList<Order>(1); sortCols.add(new Order("b", 1)); Table t = newTable("default", "st", false, new HashMap<String, String>(), sortCols, false); addBaseFile(t, null, 20L, 20); addDeltaFile(t, null, 21L, 22L, 2); addDeltaFile(t, null, 23L, 24L, 2); addDeltaFile(t, null, 21L, 24L, 4); burnThroughTransactions("default", "st", 25); CompactionRequest rqst = new CompactionRequest("default", "st", CompactionType.MINOR); txnHandler.compact(rqst); startWorker(); // There should still be four directories in the location. FileSystem fs = FileSystem.get(conf); FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation())); Assert.assertEquals(4, stat.length); }
if (indexedCols.contains(col.getName())) { indexTblCols.add(col); sortCols.add(new Order(col.getName(), 1)); k++;
@Test public void sortedPartition() throws Exception { List<Order> sortCols = new ArrayList<Order>(1); sortCols.add(new Order("b", 1)); Table t = newTable("default", "sp", true, new HashMap<String, String>(), sortCols, false); Partition p = newPartition(t, "today", sortCols); addBaseFile(t, p, 20L, 20); addDeltaFile(t, p, 21L, 22L, 2); addDeltaFile(t, p, 23L, 24L, 2); addDeltaFile(t, p, 21L, 24L, 4); burnThroughTransactions("default", "sp", 25); CompactionRequest rqst = new CompactionRequest("default", "sp", CompactionType.MINOR); rqst.setPartitionname("ds=today"); txnHandler.compact(rqst); startWorker(); // There should still be four directories in the location. FileSystem fs = FileSystem.get(conf); FileStatus[] stat = fs.listStatus(new Path(p.getSd().getLocation())); Assert.assertEquals(4, stat.length); }
@Override public void apply(StorageDescriptor t, Object[] fields) { if (fields[2] == null) return; t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2]))); }});
@Override public void apply(StorageDescriptor t, Object[] fields) { if (fields[2] == null) return; t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2]))); }});
private List<Order> convertToOrders(List<MOrder> mkeys) { List<Order> keys = null; if (mkeys != null) { keys = new ArrayList<Order>(mkeys.size()); for (MOrder part : mkeys) { keys.add(new Order(part.getCol(), part.getOrder())); } } return keys; }
private List<Order> convertToOrders(List<MOrder> mkeys) { List<Order> keys = null; if (mkeys != null) { keys = new ArrayList<Order>(mkeys.size()); for (MOrder part : mkeys) { keys.add(new Order(part.getCol(), part.getOrder())); } } return keys; }