/** * returns the SegmentTaskIndexWrapper * * @param tableSegmentUniqueIdentifierWrapper * @return */ @Override public BlockletDataMapIndexWrapper getIfPresent( TableBlockIndexUniqueIdentifierWrapper tableSegmentUniqueIdentifierWrapper) { return (BlockletDataMapIndexWrapper) lruCache.get( tableSegmentUniqueIdentifierWrapper.getTableBlockIndexUniqueIdentifier() .getUniqueTableSegmentIdentifier()); }
/** * The method clears the access count of table segments * * @param tableSegmentUniqueIdentifiersWrapper */ @Override public void clearAccessCount( List<TableBlockIndexUniqueIdentifierWrapper> tableSegmentUniqueIdentifiersWrapper) { for (TableBlockIndexUniqueIdentifierWrapper identifierWrapper : tableSegmentUniqueIdentifiersWrapper) { BlockDataMap cacheable = (BlockDataMap) lruCache.get( identifierWrapper.getTableBlockIndexUniqueIdentifier().getUniqueTableSegmentIdentifier()); cacheable.clear(); } } }
/** * method invalidate the segment cache for segment * * @param tableSegmentUniqueIdentifierWrapper */ @Override public void invalidate( TableBlockIndexUniqueIdentifierWrapper tableSegmentUniqueIdentifierWrapper) { BlockletDataMapIndexWrapper blockletDataMapIndexWrapper = getIfPresent(tableSegmentUniqueIdentifierWrapper); if (null != blockletDataMapIndexWrapper) { // clear the segmentProperties cache List<BlockDataMap> dataMaps = blockletDataMapIndexWrapper.getDataMaps(); if (null != dataMaps && !dataMaps.isEmpty()) { String segmentId = tableSegmentUniqueIdentifierWrapper.getTableBlockIndexUniqueIdentifier().getSegmentId(); // as segmentId will be same for all the dataMaps and segmentProperties cache is // maintained at segment level so it need to be called only once for clearing SegmentPropertiesAndSchemaHolder.getInstance() .invalidate(segmentId, dataMaps.get(0).getSegmentPropertiesIndex(), tableSegmentUniqueIdentifierWrapper.isAddTableBlockToUnsafeAndLRUCache()); } } lruCache.remove(tableSegmentUniqueIdentifierWrapper.getTableBlockIndexUniqueIdentifier() .getUniqueTableSegmentIdentifier()); }
@Override public void put(TableBlockIndexUniqueIdentifierWrapper tableBlockIndexUniqueIdentifierWrapper, BlockletDataMapIndexWrapper wrapper) throws IOException, MemoryException { // As dataMap will use unsafe memory, it is not recommended to overwrite an existing entry // as in that case clearing unsafe memory need to be taken card. If at all datamap entry // in the cache need to be overwritten then use the invalidate interface // and then use the put interface if (null == getIfPresent(tableBlockIndexUniqueIdentifierWrapper)) { List<BlockDataMap> dataMaps = wrapper.getDataMaps(); try { for (BlockDataMap blockletDataMap : dataMaps) { blockletDataMap.convertToUnsafeDMStore(); } // Locking is not required here because in LRU cache map add method is synchronized to add // only one entry at a time and if a key already exists it will not overwrite the entry lruCache.put(tableBlockIndexUniqueIdentifierWrapper.getTableBlockIndexUniqueIdentifier() .getUniqueTableSegmentIdentifier(), wrapper, wrapper.getMemorySize()); } catch (Throwable e) { // clear all the memory acquired by data map in case of any failure for (DataMap blockletDataMap : dataMaps) { blockletDataMap.clear(); } throw new IOException("Problem in adding datamap to cache.", e); } } }
private BlockletDataMapIndexWrapper get(TableBlockIndexUniqueIdentifierWrapper identifierWrapper, Map<String, Map<String, BlockMetaInfo>> segInfoCache) throws IOException { TableBlockIndexUniqueIdentifier identifier = identifierWrapper.getTableBlockIndexUniqueIdentifier(); String lruCacheKey = identifier.getUniqueTableSegmentIdentifier(); BlockletDataMapIndexWrapper blockletDataMapIndexWrapper =
boolean isTransactionalTable = true; TableBlockIndexUniqueIdentifier identifier = identifierWrapper.getTableBlockIndexUniqueIdentifier(); List<ColumnSchema> tableColumnList = null; if (identifier.getMergeIndexFileName() != null