replies[0] = myHeader; for (int i = 0; i < ackLen; ++i) { replies[i + 1] = ack.getHeaderFlag(i);
for (int i = ack.getNumOfReplies()-1; i >=0 && dfsClient.clientRunning; i--) { final Status reply = PipelineAck.getStatusFromHeader(ack .getHeaderFlag(i)); if (PipelineAck.getECNFromHeader(ack.getHeaderFlag(i)) == PipelineAck.ECN.CONGESTED) { congestedNodesFromAck.add(targets[i]);
replies[0] = myHeader; for (int i = 0; i < ackLen; ++i) { replies[i + 1] = ack.getHeaderFlag(i);
@Test public void TestPipeLineAckCompatibility() throws IOException { DataTransferProtos.PipelineAckProto proto = DataTransferProtos .PipelineAckProto.newBuilder() .setSeqno(0) .addReply(Status.CHECKSUM_OK) .build(); DataTransferProtos.PipelineAckProto newProto = DataTransferProtos .PipelineAckProto.newBuilder().mergeFrom(proto) .addFlag(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED, Status.CHECKSUM_OK)) .build(); ByteArrayOutputStream oldAckBytes = new ByteArrayOutputStream(); proto.writeDelimitedTo(oldAckBytes); PipelineAck oldAck = new PipelineAck(); oldAck.readFields(new ByteArrayInputStream(oldAckBytes.toByteArray())); assertEquals( PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status.CHECKSUM_OK), oldAck.getHeaderFlag(0)); PipelineAck newAck = new PipelineAck(); ByteArrayOutputStream newAckBytes = new ByteArrayOutputStream(); newProto.writeDelimitedTo(newAckBytes); newAck.readFields(new ByteArrayInputStream(newAckBytes.toByteArray())); assertEquals(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED, Status.CHECKSUM_OK), newAck.getHeaderFlag(0)); }
replies[0] = myHeader; for (int i = 0; i < ackLen; ++i) { replies[i + 1] = ack.getHeaderFlag(i);