@Override public Iterable<Map.Entry<String, String>> fetchAll() { return inReadOnlyTransaction((handle, status) -> { return handle.createQuery(fetchAllQuery) .setFetchSize(streamingFetchSize) .map(new KeyValueResultSetMapper(keyColumn, valueColumn)) .list(); }); }
/** * @param connector SQL connector to metadata * @param metadataStorageTablesConfig Tables configuration * @param dataSource Name of data source * * @return List of all data segments part of the given data source */ static List<DataSegment> getDataSegmentList(final SQLMetadataConnector connector, final MetadataStorageTablesConfig metadataStorageTablesConfig, final String dataSource) { return connector.retryTransaction((handle, status) -> handle.createQuery(String.format( "SELECT payload FROM %s WHERE dataSource = :dataSource", metadataStorageTablesConfig.getSegmentsTable())) .setFetchSize(getStreamingFetchSize(connector)) .bind("dataSource", dataSource) .map(ByteArrayMapper.FIRST) .fold(new ArrayList<>(), (Folder3<List<DataSegment>, byte[]>) (accumulator, payload, control, ctx) -> { try { final DataSegment segment = DATA_SEGMENT_INTERNER.intern(JSON_MAPPER.readValue(payload, DataSegment.class)); accumulator.add(segment); return accumulator; } catch (Exception e) { throw new SQLException(e.toString()); } }), 3, DEFAULT_MAX_TRIES); }
.setFetchSize(connector.getStreamingFetchSize()) .bind("dataSource", dataSource) .bind("start", interval.getStart().toString())
.setFetchSize(connector.getStreamingFetchSize()) .setMaxRows(limit) .bind("dataSource", dataSource)
.setFetchSize(connector.getStreamingFetchSize()) .bind("dataSource", dataSource) .map(ByteArrayMapper.FIRST)
public void apply(SQLStatement q) throws SQLException { assert q instanceof Query; ((Query) q).setFetchSize(va); } };
@Override public void apply(SQLStatement q) throws SQLException { assert q instanceof Query; ((Query) q).setFetchSize(va); } };
public void apply(SQLStatement q) throws SQLException { assert q instanceof Query; ((Query) q).setFetchSize(fs.value()); } };
@Override public void apply(SQLStatement q) throws SQLException { assert q instanceof Query; ((Query) q).setFetchSize(fs.value()); } };
@Override public void apply(SQLStatement q) throws SQLException { assert q instanceof Query; ((Query) q).setFetchSize(fs.value()); } };
public void apply(SQLStatement q) throws SQLException { assert q instanceof Query; ((Query) q).setFetchSize(fs.value()); } };
getSegmentsTable() )) .setFetchSize(connector.getStreamingFetchSize()) .bind("dataSource", ds) .map(ByteArrayMapper.FIRST)
.setFetchSize(connector.getStreamingFetchSize()) .setMaxRows(limit) .bind("dataSource", dataSource)
.setFetchSize(connector.getStreamingFetchSize()) .bind("dataSource", dataSource) .bind("start", interval.getStart().toString())
.setFetchSize(connector.getStreamingFetchSize()) .setMaxRows(limit) .bind("dataSource", dataSource)
.setFetchSize(connector.getStreamingFetchSize()) .bind("dataSource", dataSource) .bind("start", interval.getStart().toString())
@Override public Void withHandle(final Handle handle) throws Exception { // MySQL needs special setup to make it stream the results. See: // http://javaquirks.blogspot.com/2007/12/mysql-streaming-result-set.html // http://stackoverflow.com/questions/2447324/streaming-large-result-sets-with-mysql final Query<Map<String, Object>> query = handle.createQuery("getStreamingAggregationCandidates") .setFetchSize(Integer.MIN_VALUE) .bind("aggregationLevel", aggregationLevel) .bind("tenantRecordId", createCallContext().getTenantRecordId()); query.setStatementLocator(new StringTemplate3StatementLocator(TimelineAggregatorSqlDao.class)); ResultIterator<TimelineChunk> iterator = null; try { iterator = query .map(timelineChunkMapper) .iterator(); while (iterator.hasNext()) { aggregationConsumer.processTimelineChunk(iterator.next()); } } catch (Exception e) { log.error(String.format("Exception during aggregation of level %d", aggregationLevel), e); } finally { if (iterator != null) { iterator.close(); } } return null; }
.setFetchSize(connector.getStreamingFetchSize()) .bind("dataSource", ds) .map(ByteArrayMapper.FIRST)
@Test public void testFetchSize() throws Exception { h.createScript("default-data").execute(); Query<Something> q = h.createQuery("select id, name from something order by id").map(Something.class); q.setFetchSize(1); ResultIterator<Something> r = q.iterator(); assertTrue(r.hasNext()); r.next(); assertTrue(r.hasNext()); r.next(); assertFalse(r.hasNext()); }
@Test public void testFetchSize() throws Exception { h.createScript("default-data").execute(); Query<Something> q = h.createQuery("select id, name from something order by id").map(Something.class); q.setFetchSize(1); ResultIterator<Something> r = q.iterator(); assertTrue(r.hasNext()); r.next(); assertTrue(r.hasNext()); r.next(); assertFalse(r.hasNext()); }