/** * Create a label for an attribute based on the attribute label in the original repository and the original * repository name itself. */ private String getMergedAttributeLabel(Repository repository, String attributeLabel) { return attributeLabel + "(" + repository.getName() + ")"; }
/** * Create a name for an attribute based on the attribute name in the original repository and the original repository * name itself. */ private String getMergedAttributeName(Repository repository, String attributeName) { return repository.getName() + "_" + attributeName; }
/** * Create a new merged repository Metadata is merged based on the common attributes (those remain at root level) All * non-common level attributes are organised in 1 compound attribute per repository Data of all repositories is * merged based on the common columns * * @param repositoryList * list of repositories to be merged * @param commonAttributes * list of common attributes, these columns are use to 'join'/'merge' on * @param mergedRepository * the resulting repository * @param batchSize * number of records after which the result is added or updated in the repository * @return mergedRepository ElasticSearchRepository containing the merged data */ public Repository merge(List<Repository> repositoryList, List<AttributeMetaData> commonAttributes, Repository mergedRepository, int batchSize) { mergeData(repositoryList, dataService.getRepository(mergedRepository.getName()), commonAttributes, batchSize); return mergedRepository; }
entityTypeId = repository.getName();
private void createInputRepository(Repository<Entity> inputRepository) { // Add the original input dataset to database dataService.getMeta().addEntityType(inputRepository.getEntityType()); Repository<Entity> target = dataService.getRepository(inputRepository.getName()); inputRepository.forEachBatched(entities -> target.add(entities.stream()), BATCH_SIZE); }
new EntityTypeIdentity(repositoryToCopyFrom.getName()), EntityTypePermission.READ_DATA); if (!readPermission) throw new EntityTypePermissionDeniedException(EntityTypePermission.READ_DATA, entityTypeId); .getCapabilities(repositoryToCopyFrom.getName()) .contains(RepositoryCapability.WRITABLE); if (!writableCapabilities) { throw new RepositoryNotCapableException( repositoryToCopyFrom.getName(), RepositoryCapability.WRITABLE); "Location", Href.concatMetaEntityHrefV2(RestControllerV2.BASE_URI, repository.getName())); response.setStatus(HttpServletResponse.SC_CREATED); return repository.getName();
private LoadingCache<Query<Entity>, List<Object>> createQueryCache( Repository<Entity> repository) { LOG.trace("Creating Query cache for repository {}", repository.getName()); LoadingCache<Query<Entity>, List<Object>> cache = CaffeinatedGuava.build( Caffeine.newBuilder() .recordStats() .maximumSize(MAX_CACHE_SIZE_PER_QUERY) .expireAfterAccess(10, MINUTES), createCacheLoader(repository)); GuavaCacheMetrics.monitor(meterRegistry, cache, "l3." + repository.getEntityType().getId()); return cache; }
/** * Create a cacheloader that loads entity ids from the repository and stores them together with * their query * * @return the {@link CacheLoader} */ private CacheLoader<Query<Entity>, List<Object>> createCacheLoader( final Repository<Entity> repository) { String repositoryName = repository.getName(); Fetch idAttributeFetch = new Fetch().field(repository.getEntityType().getIdAttribute().getName()); return new CacheLoader<Query<Entity>, List<Object>>() { /** * Loads {@link Entity} identifiers for a {@link Query} * * @param query the cache key to load * @return {@link List} of identifier {@link Object}s */ @Override public List<Object> load(@Nonnull Query<Entity> query) { LOG.trace("Loading identifiers from repository {} for query {}", repositoryName, query); return repository .findAll(new QueryImpl<>(query).fetch(idAttributeFetch)) .map(Entity::getIdValue) .collect(toList()); } }; }
if (repository.getName().startsWith(DATASET_SHEET_PREFIX)) String identifier = repository.getName().substring(DATASET_SHEET_PREFIX.length()); violation.setImportInfo(String.format("Sheet: '%s', row: %d", repository.getName(), violation.getRownr() + 1)); violation.setImportInfo(String.format("Sheet: '%s'", repository.getName()));
DefaultAttributeMetaData repositoryCompoundAttribute = new DefaultAttributeMetaData(repository.getName(), MolgenisFieldTypes.FieldTypeEnum.COMPOUND); List<AttributeMetaData> attributeParts = new ArrayList<>();
private SortaJobExecution createJobExecution( Repository<Entity> inputData, String jobName, String ontologyIri) { String resultEntityName = idGenerator.generateId(); SortaJobExecution sortaJobExecution = sortaJobExecutionFactory.create(); sortaJobExecution.setIdentifier(resultEntityName); sortaJobExecution.setName(jobName); User currentUser = userAccountService.getCurrentUser(); sortaJobExecution.setSourceEntityName(inputData.getName()); sortaJobExecution.setDeleteUrl(getSortaServiceMenuUrl() + "/delete/" + resultEntityName); sortaJobExecution.setResultEntityName(resultEntityName); sortaJobExecution.setThreshold(DEFAULT_THRESHOLD); sortaJobExecution.setOntologyIri(ontologyIri); RunAsSystemAspect.runAsSystem( () -> { createInputRepository(inputData); createEmptyResultRepository(jobName, resultEntityName, inputData.getEntityType()); }); EntityType resultEntityType = entityTypeFactory.create(resultEntityName); permissionSystemService.giveUserWriteMetaPermissions( asList(inputData.getEntityType(), resultEntityType)); return sortaJobExecution; }