@Override public Optional<Long> sizeIfKnown() { Optional<Long> optionalUnslicedSize = ByteSource.this.sizeIfKnown(); if (optionalUnslicedSize.isPresent()) { long unslicedSize = optionalUnslicedSize.get(); long off = Math.min(offset, unslicedSize); return Optional.of(Math.min(length, unslicedSize - off)); } return Optional.absent(); }
private static void changeColumnDefinitionPosition(final ColumnFirstPositionSegment columnFirstPositionSegment, final List<ColumnMetaData> columnMetaDataList) { Optional<ColumnMetaData> columnMetaData = find(columnFirstPositionSegment.getColumnName(), columnMetaDataList); if (columnMetaData.isPresent()) { columnMetaDataList.remove(columnMetaData.get()); columnMetaDataList.add(0, columnMetaData.get()); } }
private static List<Identity> getSshIdentities(final Optional<AgentProxy> agentProxyOpt) { // ssh identities (potentially) used in authentication final ImmutableList.Builder<Identity> listBuilder = ImmutableList.builder(); if (agentProxyOpt.isPresent()) { try { final List<Identity> identities = agentProxyOpt.get().list(); for (final Identity identity : identities) { if (identity.getPublicKey().getAlgorithm().equals("RSA")) { // only RSA keys will work with our TLS implementation listBuilder.add(identity); } } } catch (Exception e) { // We catch everything because right now the masters do not require authentication. // So delay reporting errors to the user until the servers return 401 Unauthorized. log.debug("Unable to get identities from ssh-agent. Note that this might not indicate" + " an actual problem unless your Helios cluster requires authentication" + " for all requests.", e); } } return listBuilder.build(); }
@Test public void shouldSendSensorEventToMultipleRegisteredListeners() { TestSensorEventListener listener1 = new TestSensorEventListener(); TestSensorEventListener listener2 = new TestSensorEventListener(); Sensor sensor = sensorManager.getDefaultSensor(SensorManager.SENSOR_ACCELEROMETER); sensorManager.registerListener(listener1, sensor, SensorManager.SENSOR_DELAY_NORMAL); sensorManager.registerListener(listener2, sensor, SensorManager.SENSOR_DELAY_NORMAL); SensorEvent event = shadow.createSensorEvent(); shadow.sendSensorEventToListeners(event); assertThat(listener1.getLatestSensorEvent().get()).isEqualTo(event); assertThat(listener2.getLatestSensorEvent().get()).isEqualTo(event); }
if (!kafkaFutures.isPresent()) { kafkaFutures = Optional.of(new LinkedList<Future<RecordMetadata>>()); long batchSize = producerRecords.get().size(); long startTime = System.nanoTime(); int index = 0; for (ProducerRecord<String, byte[]> record : producerRecords.get()) { index++; kafkaFutures.get().add(producer.send(record, new ChannelCallback(index, startTime))); for (Future<RecordMetadata> future : kafkaFutures.get()) { future.get(); counter.addToKafkaEventSendTimer((endTime - startTime) / (1000 * 1000)); counter.addToEventPutSuccessCount(batchSize); producerRecords.get().clear(); kafkaFutures.get().clear(); } catch (Exception ex) { logger.warn("Sending events to Kafka failed", ex); counter.addToKafkaCommitTimer((endTime - startTime) / (1000 * 1000)); if (logger.isDebugEnabled()) { logger.debug(consumerAndRecords.get().getCommittedOffsetsString()); int takes = events.get().size(); if (takes > 0) { counter.addToEventTakeSuccessCount(takes); events.get().clear();
public void testListWhenResponseIs404() throws Exception { HttpResponse listResponse = HttpResponse.builder().statusCode(404).build(); EC2Api apiWhenDontExist = requestSendsResponse( list, listResponse); assertEquals(apiWhenDontExist.getTagApi().get().list().toSet(), ImmutableSet.of()); }
@Test(dependsOnMethods = "testSetupAndSubmitApplication") public void testGetReconnectableApplicationId() throws Exception { // Assert to check if cluster was created correctly by trying to reconnect to it Assert.assertEquals(this.gobblinAwsClusterLauncher.getReconnectableClusterId().get(), this.clusterId); }
@Test(dependsOnMethods = { "testPut" }) public void testGet() throws IOException { Optional<CommitSequence> sequence2 = this.store.get(this.jobName, this.datasetUrn); Assert.assertTrue(sequence2.isPresent()); Assert.assertEquals(GSON.toJsonTree(sequence2.get()), GSON.toJsonTree(this.sequence)); } }
private static void changeColumnDefinitionPosition(final ColumnAfterPositionSegment columnAfterPositionSegment, final List<ColumnMetaData> columnMetaDataList) { Optional<ColumnMetaData> columnMetaData = find(columnAfterPositionSegment.getColumnName(), columnMetaDataList); Optional<ColumnMetaData> afterColumnMetaData = find(columnAfterPositionSegment.getAfterColumnName(), columnMetaDataList); if (columnMetaData.isPresent() && afterColumnMetaData.isPresent()) { columnMetaDataList.remove(columnMetaData.get()); columnMetaDataList.add(columnMetaDataList.indexOf(afterColumnMetaData.get()) + 1, columnMetaData.get()); } }
void moveRemainingStreamsFromSource(Host source, List<Host> hosts, Optional<RateLimiter> rateLimiter) { LinkedList<String> streamsToMove = new LinkedList<String>(source.streams); Collections.shuffle(streamsToMove); if (logger.isDebugEnabled()) { logger.debug("Try to move remaining streams from {} : {}", source, streamsToMove); } int hostIdx = hosts.size() - 1; while (!streamsToMove.isEmpty()) { if (rateLimiter.isPresent()) { rateLimiter.get().acquire(); } Host target = hosts.get(hostIdx); if (!target.address.equals(source.address)) { String stream = streamsToMove.remove(); // move the stream if (moveStream(stream, source, target)) { source.streams.remove(stream); target.streams.add(stream); } } --hostIdx; if (hostIdx < 0) { hostIdx = hosts.size() - 1; } } }