Tabnine Logo
CacheProvider
Code IndexAdd Tabnine to your IDE (free)

How to use
CacheProvider
in
org.apache.carbondata.core.cache

Best Java code snippets using org.apache.carbondata.core.cache.CacheProvider (Showing top 14 results out of 315)

origin: org.apache.carbondata/carbondata-core

public BlockletDataMapFactory(CarbonTable carbonTable, DataMapSchema dataMapSchema) {
 super(carbonTable, dataMapSchema);
 this.identifier = carbonTable.getAbsoluteTableIdentifier();
 cache = CacheProvider.getInstance()
   .createCache(CacheType.DRIVER_BLOCKLET_DATAMAP);
}
origin: org.apache.carbondata/carbondata-core

/**
 * This method will check if a cache already exists for given cache type and create in case
 * it is not present in the map
 *
 * @param cacheType       type of cache
 * @param <K>
 * @param <V>
 * @return
 */
public <K, V> Cache<K, V> createCache(CacheType cacheType) {
 //check if lru cache is null, if null create one
 //check if cache is null for given cache type, if null create one
 if (!dictionaryCacheAlreadyExists(cacheType)) {
  synchronized (lock) {
   if (!dictionaryCacheAlreadyExists(cacheType)) {
    if (null == carbonLRUCache) {
     createLRULevelCacheInstance();
    }
    createDictionaryCacheForGivenType(cacheType);
   }
  }
 }
 return cacheTypeToCacheMap.get(cacheType);
}
origin: org.apache.carbondata/carbondata-core

/**
 * This method will check if a cache already exists for given cache type and store
 * if it is not present in the map
 */
public <K, V> Cache<K, V> createCache(CacheType cacheType, String cacheClassName)
  throws Exception {
 //check if lru cache is null, if null create one
 //check if cache is null for given cache type, if null create one
 if (!dictionaryCacheAlreadyExists(cacheType)) {
  synchronized (lock) {
   if (!dictionaryCacheAlreadyExists(cacheType)) {
    if (null == carbonLRUCache) {
     createLRULevelCacheInstance();
    }
    Class<?> clazz = Class.forName(cacheClassName);
    Constructor<?> constructor = clazz.getConstructors()[0];
    constructor.setAccessible(true);
    Cache cacheObject = (Cache) constructor.newInstance(carbonLRUCache);
    cacheTypeToCacheMap.put(cacheType, cacheObject);
   }
  }
 }
 return cacheTypeToCacheMap.get(cacheType);
}
origin: org.apache.carbondata/carbondata-processing

public static Dictionary getDictionary(DictionaryColumnUniqueIdentifier columnIdentifier)
  throws IOException {
 Cache<DictionaryColumnUniqueIdentifier, Dictionary> dictCache =
   CacheProvider.getInstance().createCache(CacheType.REVERSE_DICTIONARY);
 return dictCache.get(columnIdentifier);
}
origin: org.apache.carbondata/carbondata-core

 /**
  * This method will remove dictionary cache from driver for both reverse and forward dictionary
  *
  * @param carbonTableIdentifier
  * @param columnId
  */
 public static void removeDictionaryColumnFromCache(AbsoluteTableIdentifier carbonTableIdentifier,
   String columnId) {
  Cache<DictionaryColumnUniqueIdentifier, Dictionary> dictCache =
    CacheProvider.getInstance().createCache(CacheType.REVERSE_DICTIONARY);
  DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier =
    new DictionaryColumnUniqueIdentifier(carbonTableIdentifier,
      new ColumnIdentifier(columnId, null, null));
  dictCache.invalidate(dictionaryColumnUniqueIdentifier);
  dictCache = CacheProvider.getInstance().createCache(CacheType.FORWARD_DICTIONARY);
  dictCache.invalidate(dictionaryColumnUniqueIdentifier);
 }
}
origin: org.apache.carbondata/carbondata-core

CacheProvider cacheProvider = CacheProvider.getInstance();
Cache<DictionaryColumnUniqueIdentifier, Dictionary> forwardDictionaryCache = cacheProvider
  .createCache(CacheType.FORWARD_DICTIONARY);
List<Dictionary> columnDictionaryList =
  forwardDictionaryCache.getAll(dictionaryColumnUniqueIdentifiers);
origin: org.apache.carbondata/carbondata-bloom

public BloomCoarseGrainDataMapFactory(CarbonTable carbonTable, DataMapSchema dataMapSchema)
  throws MalformedDataMapCommandException {
 super(carbonTable, dataMapSchema);
 Objects.requireNonNull(carbonTable);
 Objects.requireNonNull(dataMapSchema);
 this.dataMapName = dataMapSchema.getDataMapName();
 List<CarbonColumn> indexedColumns = carbonTable.getIndexedColumns(dataMapSchema);
 this.bloomFilterSize = validateAndGetBloomFilterSize(dataMapSchema);
 this.bloomFilterFpp = validateAndGetBloomFilterFpp(dataMapSchema);
 this.bloomCompress = validateAndGetBloomCompress(dataMapSchema);
 List<ExpressionType> optimizedOperations = new ArrayList<ExpressionType>();
 // todo: support more optimize operations
 optimizedOperations.add(ExpressionType.EQUALS);
 optimizedOperations.add(ExpressionType.IN);
 this.dataMapMeta = new DataMapMeta(this.dataMapName, indexedColumns, optimizedOperations);
 LOGGER.info(String.format("DataMap %s works for %s with bloom size %d",
   this.dataMapName, this.dataMapMeta, this.bloomFilterSize));
 try {
  this.cache = CacheProvider.getInstance()
    .createCache(new CacheType("bloom_cache"), BloomDataMapCache.class.getName());
 } catch (Exception e) {
  LOGGER.error(e);
  throw new MalformedDataMapCommandException(e.getMessage());
 }
}
origin: org.apache.carbondata/carbondata-spark2

private void initializeAtFirstRow() throws IOException {
 filterValues = new Object[carbonTable.getDimensionOrdinalMax() + measureCount];
 filterRow = new RowImpl();
 filterRow.setValues(filterValues);
 outputValues = new Object[projection.length];
 outputRow = new GenericInternalRow(outputValues);
 Path file = fileSplit.getPath();
 byte[] syncMarker = getSyncMarker(file.toString());
 FileSystem fs = file.getFileSystem(hadoopConf);
 int bufferSize = Integer.parseInt(hadoopConf.get(CarbonStreamInputFormat.READ_BUFFER_SIZE,
   CarbonStreamInputFormat.READ_BUFFER_SIZE_DEFAULT));
 FSDataInputStream fileIn = fs.open(file, bufferSize);
 fileIn.seek(fileSplit.getStart());
 input = new StreamBlockletReader(syncMarker, fileIn, fileSplit.getLength(),
   fileSplit.getStart() == 0, compressorName);
 cacheProvider = CacheProvider.getInstance();
 cache = cacheProvider.createCache(CacheType.FORWARD_DICTIONARY);
 queryTypes = CarbonStreamInputFormat.getComplexDimensions(carbonTable, storageColumns, cache);
 outputSchema = new StructType((StructField[])
   DataTypeUtil.getDataTypeConverter().convertCarbonSchemaToSparkSchema(projection));
}
origin: org.apache.carbondata/carbondata-core

long t1 = System.currentTimeMillis();
if (isDictExists) {
 Cache<DictionaryColumnUniqueIdentifier, Dictionary> dictCache = CacheProvider.getInstance()
     .createCache(CacheType.REVERSE_DICTIONARY);
 dictionary = dictCache.get(identifier);
origin: org.apache.carbondata/carbondata-hadoop

Cache dictCache = CacheProvider.getInstance()
  .createCache(CacheType.REVERSE_DICTIONARY);
for (int i = 0; i < set.length; i++) {
 ColumnIdentifier columnIdentifier =
origin: org.apache.carbondata/carbondata-core

  new DictionaryColumnUniqueIdentifier(dictionarySourceAbsoluteTableIdentifier,
    columnIdentifier, carbonDimension.getDataType(), dictionaryPath);
CacheProvider cacheProvider = CacheProvider.getInstance();
Cache<DictionaryColumnUniqueIdentifier, Dictionary> forwardDictionaryCache =
  cacheProvider.createCache(CacheType.FORWARD_DICTIONARY);
origin: org.apache.carbondata/carbondata-hadoop

/**
 * This initialization is done inside executor task
 * for column dictionary involved in decoding.
 *
 * @param carbonColumns column list
 * @param carbonTable table identifier
 */
@Override public void initialize(CarbonColumn[] carbonColumns,
  CarbonTable carbonTable) throws IOException {
 this.carbonColumns = carbonColumns;
 dictionaries = new Dictionary[carbonColumns.length];
 dataTypes = new DataType[carbonColumns.length];
 for (int i = 0; i < carbonColumns.length; i++) {
  if (carbonColumns[i].hasEncoding(Encoding.DICTIONARY) && !carbonColumns[i]
    .hasEncoding(Encoding.DIRECT_DICTIONARY) && !carbonColumns[i].isComplex()) {
   CacheProvider cacheProvider = CacheProvider.getInstance();
   Cache<DictionaryColumnUniqueIdentifier, Dictionary> forwardDictionaryCache = cacheProvider
     .createCache(CacheType.FORWARD_DICTIONARY);
   dataTypes[i] = carbonColumns[i].getDataType();
   String dictionaryPath = carbonTable.getTableInfo().getFactTable().getTableProperties()
     .get(CarbonCommonConstants.DICTIONARY_PATH);
   dictionaries[i] = forwardDictionaryCache.get(new DictionaryColumnUniqueIdentifier(
     carbonTable.getAbsoluteTableIdentifier(),
     carbonColumns[i].getColumnIdentifier(), dataTypes[i], dictionaryPath));
  } else {
   dataTypes[i] = carbonColumns[i].getDataType();
  }
 }
}
origin: org.apache.carbondata/carbondata-processing

this.isEmptyBadRecord = isEmptyBadRecord;
CacheProvider cacheProvider = CacheProvider.getInstance();
Cache<DictionaryColumnUniqueIdentifier, Dictionary> cache =
  cacheProvider.createCache(CacheType.REVERSE_DICTIONARY);
origin: org.apache.carbondata/carbondata-processing

 isDirectDictionary = true;
} else if (carbonDimension.hasEncoding(Encoding.DICTIONARY)) {
 CacheProvider cacheProvider = CacheProvider.getInstance();
 Cache<DictionaryColumnUniqueIdentifier, Dictionary> cache =
   cacheProvider.createCache(CacheType.REVERSE_DICTIONARY);
 Dictionary dictionary = null;
 if (useOnePass) {
org.apache.carbondata.core.cacheCacheProvider

Javadoc

Cache provider class which will create a cache based on given type

Most used methods

  • createCache
    This method will check if a cache already exists for given cache type and store if it is not present
  • getInstance
  • createDictionaryCacheForGivenType
    This method will create the cache for given cache type
  • createLRULevelCacheInstance
    This method will create the lru cache instance based on the given type
  • dictionaryCacheAlreadyExists
    This method will check whether the map already has an entry for given cache type

Popular in Java

  • Running tasks concurrently on multiple threads
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • startActivity (Activity)
  • notifyDataSetChanged (ArrayAdapter)
  • RandomAccessFile (java.io)
    Allows reading from and writing to a file in a random-access manner. This is different from the uni-
  • Socket (java.net)
    Provides a client-side TCP socket.
  • Permission (java.security)
    Legacy security code; do not use.
  • Time (java.sql)
    Java representation of an SQL TIME value. Provides utilities to format and parse the time's represen
  • Timer (java.util)
    Timers schedule one-shot or recurring TimerTask for execution. Prefer java.util.concurrent.Scheduled
  • Filter (javax.servlet)
    A filter is an object that performs filtering tasks on either the request to a resource (a servlet o
  • From CI to AI: The AI layer in your organization
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now