Refine search
public void doOperation( final Client client, final BulkRequestBuilder bulkRequest ) { IndexRequestBuilder builder = client.prepareIndex( writeAlias, IndexingUtils.ES_ENTITY_TYPE, documentId ).setSource( data ); bulkRequest.add( builder ); }
private void logStatistics(long duration) { if (definition.isStoreStatistics()) { long totalDocuments = deletedDocuments.get() + insertedDocuments.get(); logger.trace("Indexed {} documents: {} insertions, {} updates, {} deletions", totalDocuments, insertedDocuments.get(), updatedDocuments.get(), deletedDocuments.get()); Map<String, Object> source = new HashMap<String, Object>(); Map<String, Object> statistics = Maps.newHashMap(); statistics.put("duration", duration); statistics.put("date", new Date()); statistics.put("index", index); statistics.put("type", type); statistics.put("documents.inserted", insertedDocuments.get()); statistics.put("documents.updated", updatedDocuments.get()); statistics.put("documents.deleted", deletedDocuments.get()); statistics.put("documents.total", documentCount.get()); source.put("statistics", statistics); client.prepareIndex(definition.getStatisticsIndexName(), definition.getStatisticsTypeName()).setSource(source).get(); } } }
public static void setRiverStatus(Client client, String riverName, Status status) { logger.info("setRiverStatus called with {} - {}", riverName, status); XContentBuilder xb; try { xb = jsonBuilder().startObject().startObject(MongoDBRiver.TYPE).field(MongoDBRiver.STATUS_FIELD, status).endObject() .endObject(); client.prepareIndex("_river", riverName, MongoDBRiver.STATUS_ID).setSource(xb).get(); } catch (IOException ioEx) { logger.error("setRiverStatus failed for river {}", ioEx, riverName); } }
/** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { final XContentBuilder doc = jsonBuilder().startObject(); for (Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) { doc.field(entry.getKey(), entry.getValue()); } doc.endObject(); client.prepareIndex(indexKey, table, key).setSource(doc).execute().actionGet(); return Status.OK; } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } }
@Override public void process(final InputStream in) throws IOException { String json = IOUtils.toString(in, charset) .replace("\r\n", " ").replace('\n', ' ').replace('\r', ' '); if (indexOp.equalsIgnoreCase("index")) { bulk.add(esClient.get().prepareIndex(index, docType, id) .setSource(json.getBytes(charset))); } else if (indexOp.equalsIgnoreCase("upsert")) { bulk.add(esClient.get().prepareUpdate(index, docType, id) .setDoc(json.getBytes(charset)) .setDocAsUpsert(true)); } else if (indexOp.equalsIgnoreCase("update")) { bulk.add(esClient.get().prepareUpdate(index, docType, id) .setDoc(json.getBytes(charset))); } else { throw new IOException("Index operation: " + indexOp + " not supported."); } } });
client.prepareIndex(indexKey, table, key).setSource(response.getSource()).execute().actionGet();
IndexResponse r = elasticsearchClient.prepareIndex(indexName, typeName, id).setSource(jsonMap) .setVersion(version == null ? 1 : version.longValue()) .setVersionType(version == null ? VersionType.FORCE : VersionType.EXTERNAL)
@Override public void addResults(QueryStatusInfo statusInfo, QueryData data) { if (types.get() == null && statusInfo.getColumns() != null) { types.set(getTypes(statusInfo.getColumns())); } if (data.getData() == null) { return; } checkState(types.get() != null, "Type information is missing"); List<Column> columns = statusInfo.getColumns(); for (List<Object> fields : data.getData()) { try { XContentBuilder dataBuilder = jsonBuilder().startObject(); for (int i = 0; i < fields.size(); i++) { Type type = types.get().get(i); Object value = convertValue(fields.get(i), type); dataBuilder.field(columns.get(i).getName(), value); } dataBuilder.endObject(); client.prepareIndex(tableName, "doc") .setSource(dataBuilder.string(), JSON) .get(); } catch (IOException e) { throw new UncheckedIOException("Error loading data into Elasticsearch index: " + tableName, e); } } }
/** * Tests writing a document to a new index to ensure it's working correctly. See this post: * http://s.apache.org/index-missing-exception */ private void testNewIndex() { // create the document, this ensures the index is ready // Immediately create a document and remove it to ensure the entire cluster is ready // to receive documents. Occasionally we see errors. // See this post: http://s.apache.org/index-missing-exception if (logger.isTraceEnabled()) { logger.trace("Testing new index name: read {} write {}", alias.getReadAlias(), alias.getWriteAlias()); } final RetryOperation retryOperation = () -> { final String tempId = UUIDGenerator.newTimeUUID().toString(); esProvider.getClient().prepareIndex( alias.getWriteAlias(), VERIFY_TYPE, tempId ) .setSource(DEFAULT_PAYLOAD).get(); if (logger.isTraceEnabled()) { logger.trace("Successfully created new document with docId {} in index read {} write {} and type {}", tempId, alias.getReadAlias(), alias.getWriteAlias(), VERIFY_TYPE); } // delete all types, this way if we miss one it will get cleaned up esProvider.getClient().prepareDelete( alias.getWriteAlias(), VERIFY_TYPE, tempId).get(); if (logger.isTraceEnabled()) { logger.trace("Successfully deleted documents in read {} write {} and type {} with id {}", alias.getReadAlias(), alias.getWriteAlias(), VERIFY_TYPE, tempId); } return true; }; doInRetry(retryOperation); }
@Override public void execute(Tuple tuple) { try { String index = mapper.getIndex(tuple); String type = mapper.getType(tuple); String id = mapper.getId(tuple); String source = mapper.getSource(tuple); OpType opType = mapper.getOpType(); client.prepareIndex(index, type).setId(id).setSource(source) .setOpType(opType).execute(); collector.ack(tuple); } catch (Exception e) { collector.fail(tuple); } }
@Override public void addEvent(Event event, IndexNameBuilder indexNameBuilder, String indexType, long ttlMs) throws Exception { if (bulkRequestBuilder == null) { bulkRequestBuilder = client.prepareBulk(); } IndexRequestBuilder indexRequestBuilder = null; if (indexRequestBuilderFactory == null) { indexRequestBuilder = client .prepareIndex(indexNameBuilder.getIndexName(event), indexType) .setSource(serializer.getContentBuilder(event).bytes()); } else { indexRequestBuilder = indexRequestBuilderFactory.createIndexRequest( client, indexNameBuilder.getIndexPrefix(event), indexType, event); } if (ttlMs > 0) { indexRequestBuilder.setTTL(ttlMs); } bulkRequestBuilder.add(indexRequestBuilder); }
if (be.getId() == null) continue; bulkRequest.add( elasticsearchClient.prepareIndex(indexName, be.getType(), be.getId()).setSource(be.getJsonMap()) .setVersion(be.getVersion() == null ? 1 : be.getVersion().longValue()) .setVersionType(be.getVersion() == null ? VersionType.FORCE : VersionType.EXTERNAL));
private void addToIndex(String id, Map<String, Object> json) { this.bulkRequest.add(this.client.prepareIndex(this.indexName, this.indexType, id).setSource(json)); this.bulkRequestCounter++; this.flushIndex(false); }
public void create(PhotonDoc doc) { try { this.bulkRequest.add(this.esClient.prepareIndex("photon", "place").setSource(Utils.convert(doc, this.languages)).setId(String.valueOf(doc.getPlaceId()))); } catch (IOException e) { log.error(String.format("creation of new doc [%s] failed", doc), e); } }
/** * Write a json document into the search index. * Writing using a XContentBuilder is the most efficient way to add content to elasticsearch * * @param jsonMap * the json document to be indexed in elasticsearch * @param id * the unique identifier of a document * @param indexName * the name of the index * @param typeName * the type of the index */ public IndexResponse writeSource(String indexName, XContentBuilder json, String id, String typeName, long version, VersionType versionType) { // put this to the index IndexResponse r = elasticsearchClient.prepareIndex(indexName, typeName, id).setSource(json) .setVersion(version).setVersionType(versionType).execute() .actionGet(); // documentation about the versioning is available at // https://www.elastic.co/blog/elasticsearch-versioning-support // TODO: error handling return r; }
@Override public void add(PhotonDoc doc) { try { this.bulkRequest.add(this.esClient.prepareIndex(indexName, indexType). setSource(Utils.convert(doc, languages)).setId(doc.getUid())); } catch (IOException e) { log.error("could not bulk add document " + doc.getUid(), e); return; } this.documentCount += 1; if (this.documentCount > 0 && this.documentCount % 10000 == 0) { this.saveDocuments(); } }
.prepareIndex(metadata.getSchema().toLowerCase(), entityClazz.getSimpleName(), id.toString()) .setSource(json).execute(); IndexResponse response = listenableActionFuture.actionGet();
if (be.id == null) continue; bulkRequest.add( elasticsearchClient.prepareIndex(indexName, be.type, be.id).setSource(be.jsonMap) .setVersion(1) .setCreate(false) // enforces OpType.INDEX
private void doStoreResult(TaskResult taskResult, ActionListener<Void> listener) { IndexRequestBuilder index = client.prepareIndex(TASK_INDEX, TASK_TYPE, taskResult.getTask().getTaskId().toString()); try (XContentBuilder builder = XContentFactory.contentBuilder(Requests.INDEX_CONTENT_TYPE)) { taskResult.toXContent(builder, ToXContent.EMPTY_PARAMS); index.setSource(builder); } catch (IOException e) { throw new ElasticsearchException("Couldn't convert task result to XContent for [{}]", e, taskResult.getTask()); } doStoreResult(STORE_BACKOFF_POLICY.iterator(), index, listener); }
protected void index(final AuditMessage msg) { client.prepareIndex(securityConfigurationIndex, "audit").setSource(msg.auditInfo).execute(new ActionListener<IndexResponse>() { @Override public void onResponse(final IndexResponse response) { log.trace("write audit message {}", msg); } @Override public void onFailure(final Throwable e) { log.error("Unable to write audit log due to {}", e, e.toString()); } }); }