buf.append("\n gatewaySenders: " + (new TreeSet(r.getGatewaySenderIds()))); buf.append("\n hdfsStoreName: " + r.getHDFSStoreName()); buf.append("\n hdfsWriteOnly: " + r.getHDFSWriteOnly()); buf.append("\n ignoreJTA: " + r.getIgnoreJTA()); buf.append("\n indexMaintenanceSynchronous: " + r.getIndexMaintenanceSynchronous());
buf.append("\n hdfsWriteOnly: " + r.getHDFSWriteOnly()); buf.append("\n ignoreJTA: " + r.getIgnoreJTA()); buf.append("\n indexMaintenanceSynchronous: " + r.getIndexMaintenanceSynchronous());
public static void createAndAddAsyncQueue(String regionPath, RegionAttributes regionAttributes, Cache cache) { if(!regionAttributes.getDataPolicy().withHDFS()) { return; } String leaderRegionPath = getLeaderRegionPath(regionPath, regionAttributes, cache); String defaultAsyncQueueName = HDFSStoreFactoryImpl.getEventQueueName(leaderRegionPath); if (cache.getAsyncEventQueue(defaultAsyncQueueName) == null) { if (regionAttributes.getHDFSStoreName() != null && regionAttributes.getPartitionAttributes() != null && !(regionAttributes.getPartitionAttributes().getLocalMaxMemory() == 0)) { HDFSStore store = cache.findHDFSStore(regionAttributes.getHDFSStoreName()); if (store == null) { throw new IllegalStateException( LocalizedStrings.HOPLOG_HDFS_STORE_NOT_FOUND .toLocalizedString(regionAttributes.getHDFSStoreName())); } HDFSEventQueueAttributes queueAttrs = store.getHDFSEventQueueAttributes(); if(queueAttrs == null) { // no async queue is specified for region with a HDFS store. Create a async queue with default // properties and set the bucketsorted=true. HDFSIntegrationUtil.createDefaultAsyncQueueForHDFS(cache, regionAttributes.getHDFSWriteOnly(), leaderRegionPath); } else { HDFSIntegrationUtil.createAsyncQueueForHDFS(cache, leaderRegionPath, regionAttributes.getHDFSWriteOnly(), queueAttrs); } } } }
assertTrue("Mismatch in attributes, actual.batchsize: " + store.getHDFSEventQueueAttributes().getBatchSizeMB() + " and expected batchsize: 32", store.getHDFSEventQueueAttributes().getBatchSizeMB()== 32); assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getHDFSEventQueueAttributes().isPersistent() + " and expected isPersistent: false", store.getHDFSEventQueueAttributes().isPersistent()== false); assertEquals(false, r1.getAttributes().getHDFSWriteOnly()); assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getHDFSEventQueueAttributes().getDiskStoreName() + " and expected getDiskStoreName: null", store.getHDFSEventQueueAttributes().getDiskStoreName()== null); assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getFileRolloverInterval() + " and expected getFileRolloverInterval: 3600", store.getFileRolloverInterval() == 3600); assertTrue("Mismatch in attributes, actual.batchsize: " + store.getHDFSEventQueueAttributes().getBatchSizeMB() + " and expected batchsize: 32", store.getHDFSEventQueueAttributes().getBatchSizeMB()== 32); assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getHDFSEventQueueAttributes().isPersistent() + " and expected isPersistent: false", store.getHDFSEventQueueAttributes().isPersistent()== false); assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: false", r1.getAttributes().getHDFSWriteOnly()== false); assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getHDFSEventQueueAttributes().getDiskStoreName() + " and expected getDiskStoreName: null", store.getHDFSEventQueueAttributes().getDiskStoreName()== null); assertTrue("Mismatch in attributes, actual.batchsize: " + store.getHDFSEventQueueAttributes().getBatchSizeMB() + " and expected batchsize: 151", store.getHDFSEventQueueAttributes().getBatchSizeMB()== 151); assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getHDFSEventQueueAttributes().isPersistent() + " and expected isPersistent: true", store.getHDFSEventQueueAttributes().isPersistent()== true); assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: true", r1.getAttributes().getHDFSWriteOnly()== false); assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getHDFSEventQueueAttributes().getDiskStoreName() + " and expected getDiskStoreName: mydiskstore", store.getHDFSEventQueueAttributes().getDiskStoreName().equals("mydiskstore")); assertTrue("Mismatch in attributes, actual.HDFSStoreName: " + r1.getAttributes().getHDFSStoreName() + " and expected getDiskStoreName: myHDFSStore", r1.getAttributes().getHDFSStoreName().equals("myHDFSStore"));
public static void createAndAddAsyncQueue(String regionPath, RegionAttributes regionAttributes, Cache cache) { if (!regionAttributes.getDataPolicy().withHDFS()) { return; } String leaderRegionPath = getLeaderRegionPath(regionPath, regionAttributes, cache); String defaultAsyncQueueName = HDFSStoreFactoryImpl.getEventQueueName(leaderRegionPath); if (cache.getAsyncEventQueue(defaultAsyncQueueName) == null) { if (regionAttributes.getHDFSStoreName() != null && regionAttributes.getPartitionAttributes() != null && !(regionAttributes.getPartitionAttributes().getLocalMaxMemory() == 0)) { HDFSStore store = ((GemFireCacheImpl) cache).findHDFSStore(regionAttributes.getHDFSStoreName()); if (store == null) { throw new IllegalStateException( LocalizedStrings.HOPLOG_HDFS_STORE_NOT_FOUND.toLocalizedString(regionAttributes.getHDFSStoreName())); } HDFSIntegrationUtil .createAsyncQueueForHDFS(cache, leaderRegionPath, regionAttributes.getHDFSWriteOnly(), store); } } }
rd.setHDFSWriteOnly(Boolean.valueOf(ra.getHDFSWriteOnly())); rd.setIgnoreJTA(Boolean.valueOf(ra.getIgnoreJTA())); rd.setIndexMaintenanceSynchronous(Boolean.valueOf(ra.getIndexMaintenanceSynchronous()));
this.hdfsWriteOnly = ra.getHDFSWriteOnly();
boolean hdfsWriteOnly = regAttrs.getHDFSWriteOnly(); String hdfsStoreName = regAttrs.getHDFSStoreName();
info.subscriptionConflationEnabled = attr.getEnableSubscriptionConflation(); info.hdfsStoreName = attr.getHDFSStoreName(); info.hdfsWriteOnly = attr.getHDFSWriteOnly(); info.directSubregions = new HashSet(); Set<Region> aSet = aRegion.subregions(false);
this.asyncEventQueueDescs = getDescs(attr.getAsyncEventQueueIds().toArray()); this.hdfsStoreName = attr.getHDFSStoreName(); this.hdfsWriteOnly = attr.getHDFSWriteOnly(); this.compressorDesc = getDesc(attr.getCompressor()); this.offHeap = attr.getOffHeap();
assertEquals(false, r1.getAttributes().getHDFSWriteOnly()); assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getHDFSEventQueueAttributes().getDiskStoreName() + " and expected getDiskStoreName: null", store.getHDFSEventQueueAttributes().getDiskStoreName()== null); assertTrue("Mismatch in attributes, actual.getFileRolloverInterval: " + store.getFileRolloverInterval() + " and expected getFileRolloverInterval: 3600", store.getFileRolloverInterval() == 3600); assertTrue("Mismatch in attributes, actual.batchsize: " + store.getHDFSEventQueueAttributes().getBatchSizeMB() + " and expected batchsize: 32", store.getHDFSEventQueueAttributes().getBatchSizeMB()== 32); assertTrue("Mismatch in attributes, actual.isPersistent: " + store.getHDFSEventQueueAttributes().isPersistent() + " and expected isPersistent: false", store.getHDFSEventQueueAttributes().isPersistent()== false); assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: true", r1.getAttributes().getHDFSWriteOnly()== true); assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getHDFSEventQueueAttributes().getDiskStoreName() + " and expected getDiskStoreName: null", store.getHDFSEventQueueAttributes().getDiskStoreName()== null); assertTrue("Mismatch in attributes, actual.batchInterval: " + store.getHDFSEventQueueAttributes().getBatchTimeInterval() + " and expected batchsize: 60000", store.getHDFSEventQueueAttributes().getBatchTimeInterval()== 60000); assertTrue("Mismatch in attributes, actual.isRandomAccessAllowed: " + r1.getAttributes().getHDFSWriteOnly() + " and expected isRandomAccessAllowed: true", r1.getAttributes().getHDFSWriteOnly()== true); assertTrue("Mismatch in attributes, actual.getDiskStoreName: " + store.getHDFSEventQueueAttributes().getDiskStoreName() + " and expected getDiskStoreName: mydisk", store.getHDFSEventQueueAttributes().getDiskStoreName()== "mydisk"); assertTrue("Mismatch in attributes, actual.HDFSStoreName: " + r1.getAttributes().getHDFSStoreName() + " and expected getDiskStoreName: myHDFSStore", r1.getAttributes().getHDFSStoreName()== "myHDFSStore");
this.regionAttributes.hdfsWriteOnly = regionAttributes.getHDFSWriteOnly(); if (regionAttributes instanceof UserSpecifiedRegionAttributes) { this.regionAttributes.setHasHDFSWriteOnly(((UserSpecifiedRegionAttributes<K,V>) regionAttributes).hasHDFSWriteOnly());
this.hdfsWriteOnly = attr.getHDFSWriteOnly(); this.compressorDesc = getDesc(attr.getCompressor()); this.enableOffHeapMemory = attr.getEnableOffHeapMemory();
afact.setDataPolicy(ra.getDataPolicy()); afact.setHDFSStoreName(ra.getHDFSStoreName()); afact.setHDFSWriteOnly(ra.getHDFSWriteOnly());
afact.setDataPolicy(ra.getDataPolicy()); afact.setHDFSStoreName(ra.getHDFSStoreName()); afact.setHDFSWriteOnly(ra.getHDFSWriteOnly());
afact.setDataPolicy(ra.getDataPolicy()); afact.setHDFSStoreName(ra.getHDFSStoreName()); afact.setHDFSWriteOnly(ra.getHDFSWriteOnly());
this.hdfsWriteOnly = attrs.getHDFSWriteOnly();
this.hdfsWriteOnly = attrs.getHDFSWriteOnly();
if (parentIsUserSpecified) { if (parentWithHas.hasHDFSWriteOnly()) { setHDFSWriteOnly(parent.getHDFSWriteOnly()); setHDFSWriteOnly(parent.getHDFSWriteOnly());
if (parentIsUserSpecified) { if (parentWithHas.hasHDFSWriteOnly()) { setHDFSWriteOnly(parent.getHDFSWriteOnly()); setHDFSWriteOnly(parent.getHDFSWriteOnly());