@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { // TODO GORA-298 Implement CassandraStore#getPartitions List<PartitionQuery<K,T>> partitions = new ArrayList<PartitionQuery<K,T>>(); PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<K, T>(query); pqi.setConf(getConf()); partitions.add(pqi); return partitions; }
@Override /** * Returns a single partition containing the original query */ public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query){ List<PartitionQuery<K, T>> list = new ArrayList<>(); PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query); pqi.setConf(getConf()); list.add(pqi); return list; }
@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { final List<PartitionQuery<K, T>> list = new ArrayList<>(); final PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query); pqi.setConf(getConf()); list.add(pqi); return list; }
/** * {@inheritDoc} */ @Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { // TODO : Improve code on OrientDB clusters List<PartitionQuery<K, T>> partitions = new ArrayList<>(); PartitionQueryImpl<K, T> partitionQuery = new PartitionQueryImpl<>( query); partitionQuery.setConf(this.getConf()); partitions.add(partitionQuery); return partitions; }
@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { // TODO: implement this using Hadoop DB support ArrayList<PartitionQuery<K, T>> partitions = new ArrayList<>(); PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query); pqi.setConf(getConf()); partitions.add(pqi); return partitions; }
@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { final List<PartitionQuery<K, T>> list = new ArrayList<>(); final PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query); pqi.setConf(getConf()); list.add(pqi); return list; }
@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { List<PartitionQuery<K, T>> partitions = new ArrayList<>(); PartitionQueryImpl<K, T> partitionQuery = new PartitionQueryImpl<>( query); partitionQuery.setConf(getConf()); partitions.add(partitionQuery); return partitions; }
@Override /** * Returns a single partition containing the original query */ public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query){ List<PartitionQuery<K, T>> list = new ArrayList<>(); PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query); pqi.setConf(getConf()); list.add(pqi); return list; }
/** * Partitions the given query and returns a list of PartitionQuerys, which * will execute on local data. */ @Override public List<PartitionQuery<K, T>> getPartitions(final Query<K, T> query) throws IOException { // FIXME: for now, there is only one partition as we do not handle // MongoDB sharding configuration List<PartitionQuery<K, T>> partitions = new ArrayList<>(); PartitionQueryImpl<K, T> partitionQuery = new PartitionQueryImpl<>( query); partitionQuery.setConf(getConf()); partitions.add(partitionQuery); return partitions; }
/** * {@inheritDoc} * As the Aerospike does not support query key ranges as at the moment, only the single partition * is retrieved with this method. * * @param query the query to execute. * @return the list of partitions, one partion at the list as at the moment */ @Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { List<PartitionQuery<K, T>> partitions = new ArrayList<>(); PartitionQueryImpl<K, T> partitionQuery = new PartitionQueryImpl<>( query); partitionQuery.setConf(getConf()); partitions.add(partitionQuery); return partitions; }
/** * {@inheritDoc} * As the Aerospike does not support query key ranges as at the moment, only the single partition * is retrieved with this method. * * @param query the query to execute. * @return the list of partitions, one partion at the list as at the moment */ @Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { List<PartitionQuery<K, T>> partitions = new ArrayList<>(); PartitionQueryImpl<K, T> partitionQuery = new PartitionQueryImpl<>( query); partitionQuery.setConf(getConf()); partitions.add(partitionQuery); return partitions; }
@Override public List<PartitionQuery<String, MockPersistent>> getPartitions( Query<String, MockPersistent> query) throws IOException { ArrayList<PartitionQuery<String, MockPersistent>> list = new ArrayList<>(); for(int i=0; i<NUM_PARTITIONS; i++) { list.add(new PartitionQueryImpl<>(query, LOCATIONS[i])); } return list; }
PartitionQueryImpl<K, T> partition = new PartitionQueryImpl<>( query, memberOwnedCacheEntries.first(), memberOwnedCacheEntries.last(), member.getSocketAddress().getHostString());
@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { List<PartitionQuery<K, T>> partitions = new ArrayList<>(); try { Member[] clusterMembers = new Member[hazelcastInstance.getCluster().getMembers().size()]; this.hazelcastInstance.getCluster().getMembers().toArray(clusterMembers); for (Member member : clusterMembers) { JCacheResult<K, T> result = ((JCacheResult<K, T>) query.execute()); ConcurrentSkipListSet<K> memberOwnedCacheEntries = new ConcurrentSkipListSet<>(); while (result.next()) { K key = result.getKey(); Partition partition = hazelcastInstance.getPartitionService().getPartition(key); if (partition.getOwner().getUuid().equals(member.getUuid())) { memberOwnedCacheEntries.add(key); } } PartitionQueryImpl<K, T> partition = new PartitionQueryImpl<>( query, memberOwnedCacheEntries.first(), memberOwnedCacheEntries.last(), member.getSocketAddress().getHostString()); partition.setConf(this.getConf()); partitions.add(partition); } } catch (java.lang.Exception ex) { LOG.error("Exception occurred while partitioning the query based on Hazelcast partitions.", ex); return null; } LOG.info("Query is partitioned to {} number of partitions.", partitions.size()); return partitions; }
@Test public void testReadWrite() throws Exception { MockQuery baseQuery = dataStore.newQuery(); baseQuery.setStartKey("start"); baseQuery.setLimit(42); PartitionQueryImpl<String, MockPersistent> query = new PartitionQueryImpl<>(baseQuery); TestWritable.testWritable(query); }
PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query, startKey, endKey, location); pqi.setConf(getConf()); ret.add(pqi);
null : HBaseByteInterface.fromBytes(keyClass, splitStop); PartitionQueryImpl<K, T> partition = new PartitionQueryImpl<>( query, startKey, endKey, regionLocation); partition.setConf(getConf());
null : HBaseByteInterface.fromBytes(keyClass, splitStop); PartitionQueryImpl<K, T> partition = new PartitionQueryImpl<K, T>( query, startKey, endKey, regionLocation); partition.setConf(getConf());