Tabnine Logo
org.apache.hadoop.yarn.proto
Code IndexAdd Tabnine to your IDE (free)

How to use org.apache.hadoop.yarn.proto

Best Java code snippets using org.apache.hadoop.yarn.proto (Showing top 20 results out of 315)

origin: apache/drill

private void yarnReport() {
 RegisterApplicationMasterResponse response = yarn.getRegistrationResponse();
 LOG.info("YARN queue: " + response.getQueue());
 Resource resource = response.getMaximumResourceCapability();
 LOG.info("YARN max resource: " + resource.getMemory() + " MB, "
   + resource.getVirtualCores() + " cores");
 EnumSet<SchedulerResourceTypes> types = response
   .getSchedulerResourceTypes();
 StringBuilder buf = new StringBuilder();
 String sep = "";
 for (SchedulerResourceTypes type : types) {
  buf.append(sep);
  buf.append(type.toString());
  sep = ", ";
 }
 LOG.info("YARN scheduler resource types: " + buf.toString());
}
origin: apache/drill

@Override
public void register(String trackingUrl) throws YarnFacadeException {
 String thisHostName = NetUtils.getHostname();
 LOG.debug("Host Name from YARN: " + thisHostName);
 if (trackingUrl != null) {
  // YARN seems to provide multiple names: MACHNAME.local/10.250.56.235
  // The second seems to be the IP address, which is what we want.
  String names[] = thisHostName.split("/");
  amHost = names[names.length - 1];
  appMasterTrackingUrl = trackingUrl.replace("<host>", amHost);
  LOG.info("Tracking URL: " + appMasterTrackingUrl);
 }
 try {
  LOG.trace("Registering with YARN");
  registration = resourceMgr.registerApplicationMaster(thisHostName, 0,
    appMasterTrackingUrl);
 } catch (YarnException | IOException e) {
  throw new YarnFacadeException("Register AM failed", e);
 }
 // Some distributions (but not the stock YARN) support Disk
 // resources. Since Drill compiles against Apache YARN, without disk
 // resources, we have to use an indirect mechnanism to look for the
 // disk enum at runtime when we don't have that enum value at compile time.
 for (SchedulerResourceTypes type : registration
   .getSchedulerResourceTypes()) {
  if (type.name().equals("DISK")) {
   supportsDisks = true;
  }
 }
}
origin: apache/phoenix

        + ACTIVE_STANDBY_ELECTOR_LOCK;
byte[] data = zk.getData(path, zkw, new Stat());
ActiveRMInfoProto proto = ActiveRMInfoProto.parseFrom(data);
proto.getRmId();
LOG.info("Active RmId : " + proto.getRmId());
    config.get(YarnConfiguration.RM_HOSTNAME + "." + proto.getRmId());
LOG.info("activeResourceManagerHostname = " + activeRMHost);
origin: org.apache.hadoop/hadoop-yarn-api

private void initFields() {
 containerId_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto.getDefaultInstance();
 allocatedResource_ = org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto.getDefaultInstance();
 assignedNodeId_ = org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto.getDefaultInstance();
 priority_ = org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto.getDefaultInstance();
 startTime_ = 0L;
}
private byte memoizedIsInitialized = -1;
origin: com.github.jiayuhan-it/hadoop-yarn-server-common

private void initFields() {
 containerId_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto.getDefaultInstance();
 containerState_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto.C_NEW;
 resource_ = org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto.getDefaultInstance();
 priority_ = org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto.getDefaultInstance();
 diagnostics_ = "N/A";
 containerExitStatus_ = 0;
 creationTime_ = 0L;
}
private byte memoizedIsInitialized = -1;
origin: com.github.jiayuhan-it/hadoop-yarn-server-nodemanager

private void initFields() {
 resource_ = org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto.getDefaultInstance();
 status_ = org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.ResourceStatusTypeProto.FETCH_PENDING;
 localPath_ = org.apache.hadoop.yarn.proto.YarnProtos.URLProto.getDefaultInstance();
 localSize_ = 0L;
 exception_ = org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
origin: org.apache.hadoop/hadoop-yarn-api

public Builder mergeFrom(org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto other) {
 if (other == org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto.getDefaultInstance()) return this;
 this.mergeUnknownFields(other.getUnknownFields());
 return this;
}
origin: org.apache.hadoop/hadoop-yarn-api

public Builder mergeFrom(org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto other) {
 if (other == org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto.getDefaultInstance()) return this;
 this.mergeUnknownFields(other.getUnknownFields());
 return this;
}
origin: org.apache.hadoop/hadoop-yarn-api

/**
 * <code>optional string application_name = 2;</code>
 */
public Builder clearApplicationName() {
 bitField0_ = (bitField0_ & ~0x00000002);
 applicationName_ = getDefaultInstance().getApplicationName();
 onChanged();
 return this;
}
/**
origin: org.apache.hadoop/hadoop-yarn-api

/**
 * <code>optional string application_type = 3;</code>
 */
public Builder clearApplicationType() {
 bitField0_ = (bitField0_ & ~0x00000004);
 applicationType_ = getDefaultInstance().getApplicationType();
 onChanged();
 return this;
}
/**
origin: org.apache.hadoop/hadoop-yarn-api

/**
 * <code>optional string application_type = 3;</code>
 */
public Builder clearApplicationType() {
 bitField0_ = (bitField0_ & ~0x00000004);
 applicationType_ = getDefaultInstance().getApplicationType();
 onChanged();
 return this;
}
/**
origin: org.apache.hadoop/hadoop-yarn-api

public org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto buildPartial() {
 org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto result = new org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto(this);
 onBuilt();
 return result;
}
origin: org.apache.hadoop/hadoop-yarn-api

public org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto build() {
 org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto result = buildPartial();
 if (!result.isInitialized()) {
  throw newUninitializedMessageException(result);
 }
 return result;
}
origin: org.apache.hadoop/hadoop-yarn-api

public Builder mergeFrom(org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto other) {
 if (other == org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto.getDefaultInstance()) return this;
 this.mergeUnknownFields(other.getUnknownFields());
 return this;
}
origin: org.apache.hadoop/hadoop-yarn-api

private void initFields() {
 containerId_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto.getDefaultInstance();
 allocatedResource_ = org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto.getDefaultInstance();
 assignedNodeId_ = org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto.getDefaultInstance();
 priority_ = org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto.getDefaultInstance();
 startTime_ = 0L;
 finishTime_ = 0L;
 diagnosticsInfo_ = "";
 containerExitStatus_ = 0;
 containerState_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto.C_NEW;
}
private byte memoizedIsInitialized = -1;
origin: io.hops/hadoop-yarn-server-nodemanager

private void initFields() {
 resource_ = org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto.getDefaultInstance();
 status_ = org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.ResourceStatusTypeProto.FETCH_PENDING;
 localPath_ = org.apache.hadoop.yarn.proto.YarnProtos.URLProto.getDefaultInstance();
 localSize_ = 0L;
 exception_ = org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
origin: io.hops/hadoop-yarn-api

private void initFields() {
 containerId_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto.getDefaultInstance();
 allocatedResource_ = org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto.getDefaultInstance();
 assignedNodeId_ = org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto.getDefaultInstance();
 priority_ = org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto.getDefaultInstance();
 startTime_ = 0L;
 finishTime_ = 0L;
 diagnosticsInfo_ = "";
 containerExitStatus_ = 0;
 containerState_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto.C_NEW;
}
private byte memoizedIsInitialized = -1;
origin: org.apache.hadoop/hadoop-yarn-server-nodemanager

private void initFields() {
 resource_ = org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto.getDefaultInstance();
 status_ = org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.ResourceStatusTypeProto.FETCH_PENDING;
 localPath_ = org.apache.hadoop.yarn.proto.YarnProtos.URLProto.getDefaultInstance();
 localSize_ = 0L;
 exception_ = org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
origin: com.github.jiayuhan-it/hadoop-yarn-api

private void initFields() {
 containerId_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto.getDefaultInstance();
 allocatedResource_ = org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto.getDefaultInstance();
 assignedNodeId_ = org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto.getDefaultInstance();
 priority_ = org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto.getDefaultInstance();
 startTime_ = 0L;
}
private byte memoizedIsInitialized = -1;
origin: io.hops/hadoop-yarn-api

private void initFields() {
 containerId_ = org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto.getDefaultInstance();
 allocatedResource_ = org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto.getDefaultInstance();
 assignedNodeId_ = org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto.getDefaultInstance();
 priority_ = org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto.getDefaultInstance();
 startTime_ = 0L;
}
private byte memoizedIsInitialized = -1;
org.apache.hadoop.yarn.proto

Most used classes

  • YarnProtos$ApplicationAttemptIdProto
  • YarnProtos$ApplicationAttemptIdProto$Builder
  • YarnProtos$ApplicationIdProto
  • YarnProtos$ContainerIdProto
  • YarnProtos$ResourceProto
  • YarnProtos$PriorityProto,
  • YarnProtos$ApplicationIdProto$Builder,
  • YarnProtos$ContainerIdProto$Builder,
  • YarnProtos$NodeIdProto,
  • YarnProtos$PriorityProto$Builder,
  • YarnProtos$ResourceProto$Builder,
  • YarnProtos$NodeIdProto$Builder,
  • YarnProtos$LogAggregationContextProto,
  • YarnSecurityTokenProtos$YARNDelegationTokenIdentifierProto,
  • YarnProtos$ContainerStateProto,
  • YarnProtos$FinalApplicationStatusProto,
  • YarnProtos$LogAggregationContextProto$Builder,
  • YarnProtos$SerializedExceptionProto$Builder,
  • YarnProtos$SerializedExceptionProto
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now