/** * Compare two schemas ignoring their version number. * * @return <code>true</code> if two schemas equal to each other. * <p><code>false</code>if two schemas do not equal to each other. */ public static boolean equalsIgnoreVersion(@Nonnull Schema schema1, @Nonnull Schema schema2) { Preconditions.checkNotNull(schema1); Preconditions.checkNotNull(schema2); return schema1.getSchemaName().equals(schema2.getSchemaName()) && schema1.getFieldSpecMap() .equals(schema2.getFieldSpecMap()); }
/** * Wrap {@link Schema} into a {@link ZNRecord}. */ public static ZNRecord toZNRecord(@Nonnull Schema schema) { ZNRecord record = new ZNRecord(schema.getSchemaName()); record.setSimpleField("schemaJSON", schema.getJSONSchema()); return record; }
/** * Schema APIs */ public void addOrUpdateSchema(Schema schema) { ZNRecord record = SchemaUtils.toZNRecord(schema); String name = schema.getSchemaName(); PinotHelixPropertyStoreZnRecordProvider propertyStoreHelper = PinotHelixPropertyStoreZnRecordProvider.forSchema(_propertyStore); propertyStoreHelper.set(name, record); }
ObjectNode segmentMetadata = JsonUtils.newObjectNode(); segmentMetadata.put("segmentName", _segmentName); segmentMetadata.put("schemaName", _schema != null ? _schema.getSchemaName() : null); segmentMetadata.put("crc", _crc); segmentMetadata.put("creationTimeMillis", _creationTime);
PostMethod httpPost = new PostMethod(url.toString()); try { Part[] parts = {new StringPart(schema.getSchemaName(), schema.toString())}; MultipartRequestEntity requestEntity = new MultipartRequestEntity(parts, new HttpMethodParams()); httpPost.setRequestEntity(requestEntity); LOGGER.error("Caught exception while posting the schema: {} to host: {}, port: {}", schema.getSchemaName(), host, port, e); return false;
/** * Delete the given schema. * @param schema The schema to be deleted. * @return True on success, false otherwise. */ public boolean deleteSchema(Schema schema) { if (schema != null) { String propertyStorePath = ZKMetadataProvider.constructPropertyStorePathForSchema(schema.getSchemaName()); if (_propertyStore.exists(propertyStorePath, AccessOption.PERSISTENT)) { _propertyStore.remove(propertyStorePath, AccessOption.PERSISTENT); return true; } } return false; }
@Override public boolean execute() throws Exception { if (_controllerHost == null) { _controllerHost = NetUtil.getHostAddress(); } if (!_exec) { LOGGER.warn("Dry Running Command: " + toString()); LOGGER.warn("Use the -exec option to actually execute the command."); return true; } File schemaFile = new File(_schemaFile); LOGGER.info("Executing command: " + toString()); if (!schemaFile.exists()) { throw new FileNotFoundException("file does not exist, + " + _schemaFile); } Schema schema = Schema.fromFile(schemaFile); try (FileUploadDownloadClient fileUploadDownloadClient = new FileUploadDownloadClient()) { fileUploadDownloadClient.addSchema( FileUploadDownloadClient.getUploadSchemaHttpURI(_controllerHost, Integer.parseInt(_controllerPort)), schema.getSchemaName(), schemaFile); } return true; } }
if (schemaName != null && !schema.getSchemaName().equals(schemaName)) { _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_SCHEMA_UPLOAD_ERROR, 1L); final String message = "Schema name mismatch for uploaded schema, tried to add schema with name " + schema.getSchemaName() + " as " + schemaName; throw new ControllerApplicationException(LOGGER, message, Response.Status.BAD_REQUEST); _metadataEventNotifierFactory.create().notifyOnSchemaEvents(schema, eventType); return new SuccessResponse(schema.getSchemaName() + " successfully added"); } catch (Exception e) { _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_SCHEMA_UPLOAD_ERROR, 1L);
schema.getSchemaName(), TableNameBuilder.extractRawTableName(tableName));
private static void printSchema(Schema schema) { LOGGER.info("schemaName: {}", schema.getSchemaName()); LOGGER.info("Dimension columnNames: "); int i = 0; for (DimensionFieldSpec spec : schema.getDimensionFieldSpecs()) { String columnInfo = i + " " + spec.getName(); if (!spec.isSingleValueField()) { LOGGER.info(columnInfo + " Multi-Value."); } else { LOGGER.info(columnInfo); } i += 1; } LOGGER.info("Metric columnNames: "); i = 0; for (MetricFieldSpec spec : schema.getMetricFieldSpecs()) { String columnInfo = i + " " + spec.getName(); if (!spec.isSingleValueField()) { LOGGER.info(columnInfo + " Multi-Value."); } else { LOGGER.info(columnInfo); } i += 1; } LOGGER.info("Time column: {}", schema.getTimeColumnName()); }
protected void setUpTable(File avroFile) throws Exception { String schemaName = _schema.getSchemaName(); addSchema(getSchemaFile(), schemaName); String timeColumnName = _schema.getTimeColumnName(); Assert.assertNotNull(timeColumnName); TimeUnit outgoingTimeUnit = _schema.getOutgoingTimeUnit(); Assert.assertNotNull(outgoingTimeUnit); String timeType = outgoingTimeUnit.toString(); addHybridTable(getTableName(), useLlc(), KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KafkaStarterUtils.DEFAULT_ZK_STR, getKafkaTopic(), getRealtimeSegmentFlushSize(), avroFile, timeColumnName, timeType, schemaName, TENANT_NAME, TENANT_NAME, getLoadMode(), getSortedColumn(), getInvertedIndexColumns(), getBloomFilterIndexColumns(), getRawIndexColumns(), getTaskConfig(), getStreamConsumerFactoryClassName()); completeTableConfiguration(); }
private void setupRealtimeTable(String table) throws Exception { _offlineTableConfig = null; File schemaFile = getSchemaFile(); Schema schema = Schema.fromFile(schemaFile); String schemaName = schema.getSchemaName(); addSchema(schemaFile, schemaName); String timeColumnName = schema.getTimeColumnName(); Assert.assertNotNull(timeColumnName); TimeUnit outgoingTimeUnit = schema.getOutgoingTimeUnit(); Assert.assertNotNull(outgoingTimeUnit); String timeType = outgoingTimeUnit.toString(); addRealtimeTable(table, useLlc(), KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KafkaStarterUtils.DEFAULT_ZK_STR, getKafkaTopic(), getRealtimeSegmentFlushSize(), null, timeColumnName, timeType, schemaName, null, null, getLoadMode(), getSortedColumn(), getInvertedIndexColumns(), getBloomFilterIndexColumns(), getRawIndexColumns(), getTaskConfig(), getStreamConsumerFactoryClassName()); completeTableConfiguration(); }
protected void setUpTable(File avroFile) throws Exception { File schemaFile = getSchemaFile(); Schema schema = Schema.fromFile(schemaFile); String schemaName = schema.getSchemaName(); addSchema(schemaFile, schemaName); String timeColumnName = schema.getTimeColumnName(); Assert.assertNotNull(timeColumnName); TimeUnit outgoingTimeUnit = schema.getOutgoingTimeUnit(); Assert.assertNotNull(outgoingTimeUnit); String timeType = outgoingTimeUnit.toString(); addRealtimeTable(getTableName(), useLlc(), KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KafkaStarterUtils.DEFAULT_ZK_STR, getKafkaTopic(), getRealtimeSegmentFlushSize(), avroFile, timeColumnName, timeType, schemaName, null, null, getLoadMode(), getSortedColumn(), getInvertedIndexColumns(), getBloomFilterIndexColumns(), getRawIndexColumns(), getTaskConfig(), getStreamConsumerFactoryClassName()); completeTableConfiguration(); }
String schemaName = schema.getSchemaName(); addSchema(_schemaFile, schemaName); String timeColumnName = schema.getTimeColumnName();