Tabnine Logo
GradoopFlinkConfig.getExecutionEnvironment
Code IndexAdd Tabnine to your IDE (free)

How to use
getExecutionEnvironment
method
in
org.gradoop.flink.util.GradoopFlinkConfig

Best Java code snippets using org.gradoop.flink.util.GradoopFlinkConfig.getExecutionEnvironment (Showing top 20 results out of 315)

origin: org.gradoop/gradoop-flink

/**
 * Default transformation from a Gradoop Graph to a Gelly Graph.
 *
 * @param graph Gradoop Graph.
 * @return Gelly Graph.
 */
public Graph<GradoopId, VV, EV> transformToGelly(LogicalGraph graph) {
 DataSet<Vertex<GradoopId, VV>> gellyVertices = graph.getVertices().map(toGellyVertex);
 DataSet<Edge<GradoopId, EV>> gellyEdges = graph.getEdges().map(toGellyEdge);
 return Graph.fromDataSet(gellyVertices, gellyEdges,
  graph.getConfig().getExecutionEnvironment());
}
origin: dbs-leipzig/gradoop

@Override
public LogicalGraph getLogicalGraph() {
 DataSet<Vertex> vertices = config.getExecutionEnvironment().readTextFile(jsonPath)
  .map(new MinimalJsonToVertex(config.getVertexFactory()));
 return config.getLogicalGraphFactory().fromDataSets(vertices);
}
origin: org.gradoop/gradoop-flink

@Override
public DataSet<Tuple3<String, String, String>> readDistributed(String path, GradoopFlinkConfig
 config) {
 return config.getExecutionEnvironment()
  .readTextFile(path)
  .map(line -> StringEscaper.split(line, CSVConstants.TOKEN_DELIMITER, 3))
  .map(tokens -> Tuple3.of(tokens[0], tokens[1], tokens[2]))
  .returns(new TypeHint<Tuple3<String, String, String>>() {
  });
}
origin: dbs-leipzig/gradoop

/**
 * Default transformation from a Gradoop Graph to a Gelly Graph.
 *
 * @param graph Gradoop Graph.
 * @return Gelly Graph.
 */
public Graph<GradoopId, VV, EV> transformToGelly(LogicalGraph graph) {
 DataSet<Vertex<GradoopId, VV>> gellyVertices = graph.getVertices().map(toGellyVertex);
 DataSet<Edge<GradoopId, EV>> gellyEdges = graph.getEdges().map(toGellyEdge);
 return Graph.fromDataSet(gellyVertices, gellyEdges,
  graph.getConfig().getExecutionEnvironment());
}
origin: dbs-leipzig/gradoop

@Override
public DataSet<Tuple3<String, String, String>> readDistributed(String path, GradoopFlinkConfig
 config) {
 return config.getExecutionEnvironment()
  .readTextFile(path)
  .map(line -> StringEscaper.split(line, CSVConstants.TOKEN_DELIMITER, 3))
  .map(tokens -> Tuple3.of(tokens[0], tokens[1], tokens[2]))
  .returns(new TypeHint<Tuple3<String, String, String>>() {
  });
}
origin: dbs-leipzig/gradoop

/**
 * Reads the input as dataset of TLFGraphs.
 *
 * @return io graphs
 */
public DataSet<LabeledGraphStringString> getGraphs() throws IOException {
 ExecutionEnvironment env = getConfig().getExecutionEnvironment();
 return env.createInput(HadoopInputs.readHadoopFile(
  new TLFInputFormat(), LongWritable.class, Text.class, getFilePath()))
  .map(new DIMSpanGraphFromText());
}
origin: org.gradoop/gradoop-flink

@Override
public GVELayout fromDataSets(DataSet<Vertex> vertices, DataSet<Edge> edges) {
 Objects.requireNonNull(vertices, "Vertex DataSet was null");
 Objects.requireNonNull(edges, "Edge DataSet was null");
 GraphHead graphHead = getConfig()
  .getGraphHeadFactory()
  .createGraphHead();
 DataSet<GraphHead> graphHeadSet = getConfig().getExecutionEnvironment()
  .fromElements(graphHead);
 // update vertices and edges with new graph head id
 vertices = vertices
  .map(new AddToGraph<>(graphHead))
  .withForwardedFields("id;label;properties");
 edges = edges
  .map(new AddToGraph<>(graphHead))
  .withForwardedFields("id;sourceId;targetId;label;properties");
 return new GVELayout(graphHeadSet, vertices, edges);
}
origin: dbs-leipzig/gradoop

@Override
public DataSet<String> execute(GraphCollection collection) {
 // 1-10.
 DataSet<GraphHeadString> graphHeadLabels = getGraphHeadStrings(collection);
 // 11. add empty head to prevent empty result for empty collection
 graphHeadLabels = graphHeadLabels
  .union(collection
   .getConfig()
   .getExecutionEnvironment()
   .fromElements(new GraphHeadString(GradoopId.get(), "")));
 // 12. label collection
 return graphHeadLabels
  .reduceGroup(new ConcatGraphHeadStrings());
}
origin: org.gradoop/gradoop-flink

@Override
public DataSet<String> execute(GraphCollection collection) {
 // 1-10.
 DataSet<GraphHeadString> graphHeadLabels = getGraphHeadStrings(collection);
 // 11. add empty head to prevent empty result for empty collection
 graphHeadLabels = graphHeadLabels
  .union(collection
   .getConfig()
   .getExecutionEnvironment()
   .fromElements(new GraphHeadString(GradoopId.get(), "")));
 // 12. label collection
 return graphHeadLabels
  .reduceGroup(new ConcatGraphHeadStrings());
}
origin: dbs-leipzig/gradoop

/**
 * Reads the csv file specified by {@link MinimalCSVImporter#path} and converts each valid line
 * to a {@link Vertex}.
 *
 * @param propertyNames list of the property identifier names
 * @param checkReoccurringHeader set to true if each row of the file should be checked for
 *                               reoccurring of the column property names
 * @return a {@link DataSet} of all vertices from one specific file
 */
private DataSet<Vertex> readCSVFile(List<String> propertyNames, boolean checkReoccurringHeader) {
 return config.getExecutionEnvironment()
  .readTextFile(path)
  .flatMap(new CsvRowToProperties(tokenSeparator, propertyNames, checkReoccurringHeader))
  .map(new PropertiesToVertex<>(config.getVertexFactory()))
  .returns(config.getVertexFactory().getType());
}
origin: org.gradoop/gradoop-flink

/**
 * {@inheritDoc}
 */
@Override
public DataSet<Boolean> isEmpty() {
 return getVertices()
  .map(new True<>())
  .distinct()
  .union(getConfig().getExecutionEnvironment().fromElements(false))
  .reduce(new Or())
  .map(new Not());
}
origin: dbs-leipzig/gradoop

@Override
public DataSet<Boolean> isEmpty() {
 return getVertices()
  .map(new True<>())
  .distinct()
  .union(getConfig().getExecutionEnvironment().fromElements(false))
  .reduce(new Or())
  .map(new Not());
}
origin: dbs-leipzig/gradoop

@Override
public DataSet<Boolean> isEmpty() {
 return getGraphHeads()
  .map(new True<>())
  .distinct()
  .union(getConfig().getExecutionEnvironment().fromElements(false))
  .reduce(new Or())
  .map(new Not());
}
origin: org.gradoop/gradoop-flink

/**
 * {@inheritDoc}
 */
@Override
public DataSet<Boolean> isEmpty() {
 return getGraphHeads()
  .map(new True<>())
  .distinct()
  .union(getConfig().getExecutionEnvironment().fromElements(false))
  .reduce(new Or())
  .map(new Not());
}
origin: org.gradoop/gradoop-flink

@Override
public LogicalGraph getLogicalGraph() {
 DataSet<Tuple3<String, String, String>> metaData =
  MetaData.fromFile(getMetaDataPath(), getConfig());
 DataSet<Vertex> vertices = getConfig().getExecutionEnvironment()
  .readTextFile(getVertexCSVPath())
  .map(new CSVLineToVertex(getConfig().getVertexFactory()))
  .withBroadcastSet(metaData, BC_METADATA);
 DataSet<Edge> edges = getConfig().getExecutionEnvironment()
  .readTextFile(getEdgeCSVPath())
  .map(new CSVLineToEdge(getConfig().getEdgeFactory()))
  .withBroadcastSet(metaData, BC_METADATA);
 return getConfig().getLogicalGraphFactory().fromDataSets(vertices, edges);
}
origin: dbs-leipzig/gradoop

@Override
public LogicalGraph getLogicalGraph() {
 DataSet<Tuple3<String, String, String>> metaData =
  MetaData.fromFile(getMetaDataPath(), getConfig());
 DataSet<Vertex> vertices = getConfig().getExecutionEnvironment()
  .readTextFile(getVertexCSVPath())
  .map(new CSVLineToVertex(getConfig().getVertexFactory()))
  .withBroadcastSet(metaData, BC_METADATA);
 DataSet<Edge> edges = getConfig().getExecutionEnvironment()
  .readTextFile(getEdgeCSVPath())
  .map(new CSVLineToEdge(getConfig().getEdgeFactory()))
  .withBroadcastSet(metaData, BC_METADATA);
 return getConfig().getLogicalGraphFactory().fromDataSets(vertices, edges);
}
origin: dbs-leipzig/gradoop

/**
 * Returns a collection of all logical graph contained in the database.
 *
 * @return collection of all logical graphs
 */
public GraphCollection getGraphCollection() {
 ExecutionEnvironment env = config.getExecutionEnvironment();
 DataSet<Vertex> newVertices = env.fromCollection(getVertices())
  .filter(vertex -> vertex.getGraphCount() > 0);
 DataSet<Edge> newEdges = env.fromCollection(getEdges())
  .filter(edge -> edge.getGraphCount() > 0);
 return config.getGraphCollectionFactory()
  .fromDataSets(env.fromCollection(getGraphHeads()), newVertices, newEdges);
}
origin: org.gradoop/gradoop-flink

/**
 * Returns a collection of all logical graph contained in the database.
 *
 * @return collection of all logical graphs
 */
public GraphCollection getGraphCollection() {
 ExecutionEnvironment env = config.getExecutionEnvironment();
 DataSet<Vertex> newVertices = env.fromCollection(getVertices())
  .filter(vertex -> vertex.getGraphCount() > 0);
 DataSet<Edge> newEdges = env.fromCollection(getEdges())
  .filter(edge -> edge.getGraphCount() > 0);
 return config.getGraphCollectionFactory()
  .fromDataSets(env.fromCollection(getGraphHeads()), newVertices, newEdges);
}
origin: org.gradoop/gradoop-flink

/**
 * {@inheritDoc}
 *
 * Calls Flink Gelly algorithms to compute the global clustering coefficient for a directed graph.
 */
@Override
protected LogicalGraph executeInternal(Graph<GradoopId, NullValue, NullValue> gellyGraph)
 throws Exception {
 GlobalClusteringCoefficient global = new org.apache.flink.graph.library.clustering.directed
  .GlobalClusteringCoefficient<GradoopId, NullValue, NullValue>().run(gellyGraph);
 currentGraph.getConfig().getExecutionEnvironment().execute();
 double globalValue = global.getResult().getGlobalClusteringCoefficientScore();
 DataSet<GraphHead> resultHead = currentGraph.getGraphHead()
  .map(new WritePropertyToGraphHeadMap(ClusteringCoefficientBase.PROPERTY_KEY_GLOBAL,
   PropertyValue.create(globalValue)));
 return currentGraph.getConfig().getLogicalGraphFactory().fromDataSets(
  resultHead, currentGraph.getVertices(), currentGraph.getEdges());
}
origin: dbs-leipzig/gradoop

@Override
public GraphCollection getGraphCollection() {
 GraphCollectionFactory factory = getFlinkConfig().getGraphCollectionFactory();
 ExecutionEnvironment env = getFlinkConfig().getExecutionEnvironment();
 return factory.fromDataSets(
  /*graph head format*/
  env.createInput(new GraphHeadInputFormat(
   getStore().getConfig().getAccumuloProperties(),
   graphHeadQuery)),
  /*vertex input format*/
  env.createInput(new VertexInputFormat(getStore().getConfig().getAccumuloProperties(),
   vertexQuery)),
  /*edge input format*/
  env.createInput(new EdgeInputFormat(getStore().getConfig().getAccumuloProperties(),
   edgeQuery)));
}
org.gradoop.flink.utilGradoopFlinkConfiggetExecutionEnvironment

Javadoc

Returns the Flink execution environment.

Popular methods of GradoopFlinkConfig

  • getGraphCollectionFactory
    Returns a factory that is able to create graph collection layouts.
  • getVertexFactory
  • getEdgeFactory
  • getLogicalGraphFactory
    Returns a factory that is able to create logical graph layouts.
  • createConfig
    Creates a Gradoop Flink configuration using the given parameters.
  • getGraphHeadFactory
  • <init>
    Creates a new Configuration.
  • setGraphCollectionLayoutFactory
    Sets the layout factory for building layouts that represent a GraphCollection.
  • setLogicalGraphLayoutFactory
    Sets the layout factory for building layouts that represent a LogicalGraph.

Popular in Java

  • Creating JSON documents from java classes using gson
  • getExternalFilesDir (Context)
  • getApplicationContext (Context)
  • runOnUiThread (Activity)
  • Thread (java.lang)
    A thread is a thread of execution in a program. The Java Virtual Machine allows an application to ha
  • TimeZone (java.util)
    TimeZone represents a time zone offset, and also figures out daylight savings. Typically, you get a
  • Base64 (org.apache.commons.codec.binary)
    Provides Base64 encoding and decoding as defined by RFC 2045.This class implements section 6.8. Base
  • IOUtils (org.apache.commons.io)
    General IO stream manipulation utilities. This class provides static utility methods for input/outpu
  • Loader (org.hibernate.loader)
    Abstract superclass of object loading (and querying) strategies. This class implements useful common
  • DateTimeFormat (org.joda.time.format)
    Factory that creates instances of DateTimeFormatter from patterns and styles. Datetime formatting i
  • Top 12 Jupyter Notebook extensions
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now