/** * returns the SegmentTaskIndexWrapper * * @param tableSegmentUniqueIdentifierWrapper * @return */ @Override public BlockletDataMapIndexWrapper getIfPresent( TableBlockIndexUniqueIdentifierWrapper tableSegmentUniqueIdentifierWrapper) { return (BlockletDataMapIndexWrapper) lruCache.get( tableSegmentUniqueIdentifierWrapper.getTableBlockIndexUniqueIdentifier() .getUniqueTableSegmentIdentifier()); }
/** * The method clears the access count of table segments * * @param tableSegmentUniqueIdentifiersWrapper */ @Override public void clearAccessCount( List<TableBlockIndexUniqueIdentifierWrapper> tableSegmentUniqueIdentifiersWrapper) { for (TableBlockIndexUniqueIdentifierWrapper identifierWrapper : tableSegmentUniqueIdentifiersWrapper) { BlockDataMap cacheable = (BlockDataMap) lruCache.get( identifierWrapper.getTableBlockIndexUniqueIdentifier().getUniqueTableSegmentIdentifier()); cacheable.clear(); } } }
@Override public void put(TableBlockIndexUniqueIdentifierWrapper tableBlockIndexUniqueIdentifierWrapper, BlockletDataMapIndexWrapper wrapper) throws IOException, MemoryException { // As dataMap will use unsafe memory, it is not recommended to overwrite an existing entry // as in that case clearing unsafe memory need to be taken card. If at all datamap entry // in the cache need to be overwritten then use the invalidate interface // and then use the put interface if (null == getIfPresent(tableBlockIndexUniqueIdentifierWrapper)) { List<BlockDataMap> dataMaps = wrapper.getDataMaps(); try { for (BlockDataMap blockletDataMap : dataMaps) { blockletDataMap.convertToUnsafeDMStore(); } // Locking is not required here because in LRU cache map add method is synchronized to add // only one entry at a time and if a key already exists it will not overwrite the entry lruCache.put(tableBlockIndexUniqueIdentifierWrapper.getTableBlockIndexUniqueIdentifier() .getUniqueTableSegmentIdentifier(), wrapper, wrapper.getMemorySize()); } catch (Throwable e) { // clear all the memory acquired by data map in case of any failure for (DataMap blockletDataMap : dataMaps) { blockletDataMap.clear(); } throw new IOException("Problem in adding datamap to cache.", e); } } }
/** * method invalidate the segment cache for segment * * @param tableSegmentUniqueIdentifierWrapper */ @Override public void invalidate( TableBlockIndexUniqueIdentifierWrapper tableSegmentUniqueIdentifierWrapper) { BlockletDataMapIndexWrapper blockletDataMapIndexWrapper = getIfPresent(tableSegmentUniqueIdentifierWrapper); if (null != blockletDataMapIndexWrapper) { // clear the segmentProperties cache List<BlockDataMap> dataMaps = blockletDataMapIndexWrapper.getDataMaps(); if (null != dataMaps && !dataMaps.isEmpty()) { String segmentId = tableSegmentUniqueIdentifierWrapper.getTableBlockIndexUniqueIdentifier().getSegmentId(); // as segmentId will be same for all the dataMaps and segmentProperties cache is // maintained at segment level so it need to be called only once for clearing SegmentPropertiesAndSchemaHolder.getInstance() .invalidate(segmentId, dataMaps.get(0).getSegmentPropertiesIndex(), tableSegmentUniqueIdentifierWrapper.isAddTableBlockToUnsafeAndLRUCache()); } } lruCache.remove(tableSegmentUniqueIdentifierWrapper.getTableBlockIndexUniqueIdentifier() .getUniqueTableSegmentIdentifier()); }
TableBlockIndexUniqueIdentifier identifier = identifierWrapper.getTableBlockIndexUniqueIdentifier(); String lruCacheKey = identifier.getUniqueTableSegmentIdentifier(); BlockletDataMapIndexWrapper blockletDataMapIndexWrapper = (BlockletDataMapIndexWrapper) lruCache.get(lruCacheKey); lruCache.put(identifier.getUniqueTableSegmentIdentifier(), blockletDataMapIndexWrapper, blockletDataMapIndexWrapper.getMemorySize());
/** * Below method will be used to load the segment of segments * One segment may have multiple task , so table segment will be loaded * based on task id and will return the map of taksId to table segment * map * * @return map of taks id to segment mapping * @throws IOException */ private BlockDataMap loadAndGetDataMap(TableBlockIndexUniqueIdentifier identifier, SegmentIndexFileStore indexFileStore, Map<String, BlockMetaInfo> blockMetaInfoMap, CarbonTable carbonTable, boolean addTableBlockToUnsafe, Configuration configuration) throws IOException, MemoryException { String uniqueTableSegmentIdentifier = identifier.getUniqueTableSegmentIdentifier(); Object lock = segmentLockMap.get(uniqueTableSegmentIdentifier); if (lock == null) { lock = addAndGetSegmentLock(uniqueTableSegmentIdentifier); } BlockDataMap dataMap; synchronized (lock) { dataMap = (BlockDataMap) BlockletDataMapFactory.createDataMap(carbonTable); dataMap.init(new BlockletDataMapModel(carbonTable, identifier.getIndexFilePath() + CarbonCommonConstants.FILE_SEPARATOR + identifier .getIndexFileName(), indexFileStore.getFileData(identifier.getIndexFileName()), blockMetaInfoMap, identifier.getSegmentId(), addTableBlockToUnsafe, configuration)); } return dataMap; }