private void writeDataFrameToDAL(DataFrame data) {
if (this.preserveOrder) {
logDebug("Inserting data with order preserved! Each partition will be written using separate jobs.");
for (int i = 0; i < data.rdd().partitions().length; i++) {
data.sqlContext().sparkContext().runJob(data.rdd(),
new AnalyticsWritingFunction(this.tenantId, this.tableName, data.schema(),
this.globalTenantAccess, this.schemaString, this.primaryKeys, this.mergeFlag,
this.recordStore, this.recordBatchSize), CarbonScalaUtils.getNumberSeq(i, i + 1),
false, ClassTag$.MODULE$.Unit());
}
} else {
data.foreachPartition(new AnalyticsWritingFunction(this.tenantId, this.tableName, data.schema(),
this.globalTenantAccess, this.schemaString, this.primaryKeys, this.mergeFlag,
this.recordStore, this.recordBatchSize));
}
}