private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) { BucketRegion br = region.getDataStore().getLocalBucketById(bucketId); if (br == null) { // got rebalanced or something throw new PrimaryBucketException("Bucket region is no longer available " + bucketId + region); } return br.getHoplogOrganizer(); } }
protected void checkForSafeError(Exception e) { boolean safeError = ShutdownHookManager.get().isShutdownInProgress(); if (safeError) { // IOException because of closed file system. This happens when member is // shutting down if (logger.isDebugEnabled()) logger.debug("IO error caused by filesystem shutdown", e); throw new CacheClosedException("IO error caused by filesystem shutdown", e); } if(isClosed()) { //If the hoplog organizer is closed, throw an exception to indicate the //caller should retry on the new primary. throw new PrimaryBucketException(e); } }
private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) { BucketRegion br = region.getDataStore().getLocalBucketById(bucketId); if (br == null) { // got rebalanced or something throw new PrimaryBucketException("Bucket region is no longer available " + bucketId + region); } return br.getHoplogOrganizer(); }
public int size() { closeDeadIterators(); if (!owner.getPartitionedRegion().includeHDFSResults()) { if (logger.isDebugEnabled() || DEBUG) { logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Ignoring HDFS results for #size")); } return backingRM.sizeInVM(); } try { return createEntriesSet(IteratorType.KEYS).size(); } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } }
public boolean isEmpty() { closeDeadIterators(); if (!owner.getPartitionedRegion().includeHDFSResults()) { if (logger.isDebugEnabled() || DEBUG) { logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Ignoring HDFS results for #isEmpty")); } return backingRM.sizeInVM() == 0; } try { return createEntriesSet(IteratorType.KEYS).isEmpty(); } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } }
@SuppressWarnings("unchecked") public Collection<RegionEntry> regionEntries() { closeDeadIterators(); if (!owner.getPartitionedRegion().includeHDFSResults()) { if (logger.isDebugEnabled() || DEBUG) { logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "Ignoring HDFS results for #regionEntries")); } return backingRM.regionEntriesInVM(); } try { return createEntriesSet(IteratorType.ENTRIES); } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } }
protected void checkForSafeError(Exception e) { boolean safeError = ShutdownHookManager.get().isShutdownInProgress(); if (safeError) { // IOException because of closed file system. This happens when member is // shutting down logger.fine("IO error caused by filesystem shutdown", e); throw new CacheClosedException("IO error caused by filesystem shutdown", e); } if(isClosed()) { //If the hoplog organizer is closed, throw an exception to indicate the //caller should retry on the new primary. throw new PrimaryBucketException(e); } }
public boolean isEmpty() { closeDeadIterators(); if (!owner.getPartitionedRegion().includeHDFSResults()) { if (logger.fineEnabled() || DEBUG) { logger.info(LocalizedStrings.DEBUG, "Ignoring HDFS results for #isEmpty"); } return backingRM.sizeInVM() == 0; } try { return createEntriesSet(IteratorType.KEYS).isEmpty(); } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } }
public int size() { closeDeadIterators(); if (!owner.getPartitionedRegion().includeHDFSResults()) { if (logger.fineEnabled() || DEBUG) { logger.info(LocalizedStrings.DEBUG, "Ignoring HDFS results for #size"); } return backingRM.sizeInVM(); } try { return createEntriesSet(IteratorType.KEYS).size(); } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } }
public void checkForPrimary() { final boolean isp = getBucketAdvisor().isPrimary(); if (! isp){ this.partitionedRegion.checkReadiness(); checkReadiness(); InternalDistributedMember primaryHolder = getBucketAdvisor().basicGetPrimaryMember(); throw new PrimaryBucketException("Bucket " + getName() + " is not primary. Current primary holder is "+primaryHolder); } }
public void checkForPrimary() { final boolean isp = getBucketAdvisor().isPrimary(); if (! isp){ this.partitionedRegion.checkReadiness(); checkReadiness(); InternalDistributedMember primaryHolder = getBucketAdvisor().basicGetPrimaryMember(); throw new PrimaryBucketException("Bucket " + getName() + " is not primary. Current primary holder is "+primaryHolder); } }
@Override public int sizeEstimate() { if (isHDFSReadWriteRegion()) { try { checkForPrimary(); ConcurrentParallelGatewaySenderQueue q = getHDFSQueue(); if (q == null) return 0; int hdfsBucketRegionSize = q.getBucketRegionQueue( partitionedRegion, getId()).size(); int hoplogEstimate = (int) getHoplogOrganizer().sizeEstimate(); if (logger.isDebugEnabled()) { logger.debug("for bucket " + getName() + " estimateSize returning " + (hdfsBucketRegionSize + hoplogEstimate)); } return hdfsBucketRegionSize + hoplogEstimate; } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } } return size(); }
@SuppressWarnings("unchecked") public Collection<RegionEntry> regionEntries() { closeDeadIterators(); if (!owner.getPartitionedRegion().includeHDFSResults()) { if (logger.fineEnabled() || DEBUG) { logger.info(LocalizedStrings.DEBUG, "Ignoring HDFS results for #regionEntries"); } return backingRM.regionEntriesInVM(); } try { return createEntriesSet(IteratorType.ENTRIES); } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } }
@Override protected RegionEntry basicGetTXEntry(KeyInfo keyInfo) { int bucketId = keyInfo.getBucketId(); if (bucketId == KeyInfo.UNKNOWN_BUCKET) { bucketId = PartitionedRegionHelper.getHashKey(this, null, keyInfo.getKey(), keyInfo.getValue(), keyInfo.getCallbackArg()); keyInfo.setBucketId(bucketId); } if (keyInfo.isCheckPrimary()) { DistributedMember primary = getRegionAdvisor().getPrimaryMemberForBucket( bucketId); if (!primary.equals(getMyId())) { throw new PrimaryBucketException("Bucket " + bucketId + " is not primary. Current primary holder is " + primary); } } BucketRegion br = this.dataStore.getLocalBucketById(bucketId); RegionEntry re = br.basicGetEntry(keyInfo.getKey()); if (re != null && re.isRemoved()) { re = null; } return re; }
@Override public final void checkAllBucketsHosted() throws BucketMovedException { // check if bucket has moved if (this.pr != null) { final BucketAdvisor bucAdvisor = this.pr.getRegionAdvisor() .getBucketAdvisor(this.bucketId); if (optimizeForWrite()) { if (!bucAdvisor.isPrimary()) { this.pr.checkReadiness(); InternalDistributedMember primaryHolder = bucAdvisor .basicGetPrimaryMember(); throw new PrimaryBucketException("Bucket " + this.pr.getBucketName(this.bucketId) + " is not primary. Current primary holder is " + primaryHolder); } } else if (!bucAdvisor.isHosting()) { throw new BucketMovedException( LocalizedStrings.FunctionService_BUCKET_MIGRATED_TO_ANOTHER_NODE .toLocalizedString(), this.bucketId, this.pr.getFullPath()); } } }
@Override public final void checkAllBucketsHosted() throws BucketMovedException { // check if bucket has moved if (this.pr != null) { final BucketAdvisor bucAdvisor = this.pr.getRegionAdvisor() .getBucketAdvisor(this.bucketId); if (optimizeForWrite()) { if (!bucAdvisor.isPrimary()) { this.pr.checkReadiness(); InternalDistributedMember primaryHolder = bucAdvisor .basicGetPrimaryMember(); throw new PrimaryBucketException("Bucket " + this.pr.getBucketName(this.bucketId) + " is not primary. Current primary holder is " + primaryHolder); } } else if (!bucAdvisor.isHosting()) { throw new BucketMovedException( LocalizedStrings.FunctionService_BUCKET_MIGRATED_TO_ANOTHER_NODE .toLocalizedString(), this.bucketId, this.pr.getFullPath()); } } }
@Override public final void checkAllBucketsHosted() throws BucketMovedException { // check if bucket has moved if (this.pr != null) { final BucketAdvisor bucAdvisor = this.pr.getRegionAdvisor() .getBucketAdvisor(this.bucketId); if (optimizeForWrite()) { if (!bucAdvisor.isPrimary()) { this.pr.checkReadiness(); InternalDistributedMember primaryHolder = bucAdvisor .basicGetPrimaryMember(); throw new PrimaryBucketException("Bucket " + this.pr.getBucketName(this.bucketId) + " is not primary. Current primary holder is " + primaryHolder); } } else if (!bucAdvisor.isHosting()) { throw new BucketMovedException( LocalizedStrings.FunctionService_BUCKET_MIGRATED_TO_ANOTHER_NODE .toLocalizedString(), this.bucketId, this.pr.getFullPath()); } } }
@Override public int sizeEstimate() { if (isHDFSReadWriteRegion()) { try { checkForPrimary(); ConcurrentParallelGatewaySenderQueue q = getHDFSQueue(); if (q == null) return 0; int hdfsBucketRegionSize = q.getBucketRegionQueue( partitionedRegion, getId()).size(); int hoplogEstimate = (int) getHoplogOrganizer().sizeEstimate(); if (getLogWriterI18n().fineEnabled()) { getLogWriterI18n().fine("for bucket " + getName() + " estimateSize returning " + (hdfsBucketRegionSize + hoplogEstimate)); } return hdfsBucketRegionSize + hoplogEstimate; } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } } return size(); }
private ConcurrentParallelGatewaySenderQueue getHDFSQueue() throws ForceReattemptException { if (this.hdfsQueue == null) { String asyncQId = this.owner.getPartitionedRegion().getHDFSEventQueueName(); final AsyncEventQueueImpl asyncQ = (AsyncEventQueueImpl)this.owner.getCache().getAsyncEventQueue(asyncQId); final ParallelGatewaySenderImpl gatewaySender = (ParallelGatewaySenderImpl)asyncQ.getSender(); AbstractGatewaySenderEventProcessor ep = gatewaySender.getEventProcessor(); if (ep == null) return null; hdfsQueue = (ConcurrentParallelGatewaySenderQueue)ep.getQueue(); } // Check whether the queue has become primary here. // There could be some time between bucket becoming a primary // and underlying queue becoming a primary, so isPrimaryWithWait() // waits for some time for the queue to become a primary on this member final HDFSBucketRegionQueue brq = hdfsQueue.getBucketRegionQueue( this.owner.getPartitionedRegion(), this.owner.getId()); if (brq != null) { if (owner.getBucketAdvisor().isPrimary() && !brq.getBucketAdvisor().isPrimaryWithWait()) { InternalDistributedMember primaryHolder = brq.getBucketAdvisor() .basicGetPrimaryMember(); throw new PrimaryBucketException("Bucket " + brq.getName() + " is not primary. Current primary holder is " + primaryHolder); } } return hdfsQueue; }
private ConcurrentParallelGatewaySenderQueue getHDFSQueue() throws ForceReattemptException { if (this.hdfsQueue == null) { String asyncQId = this.owner.getPartitionedRegion().getHDFSEventQueueName(); final AsyncEventQueueImpl asyncQ = (AsyncEventQueueImpl)this.owner.getCache().getAsyncEventQueue(asyncQId); final AbstractGatewaySender gatewaySender = (AbstractGatewaySender)asyncQ.getSender(); AbstractGatewaySenderEventProcessor ep = gatewaySender.getEventProcessor(); if (ep == null) return null; hdfsQueue = (ConcurrentParallelGatewaySenderQueue)ep.getQueue(); } // Check whether the queue has become primary here. // There could be some time between bucket becoming a primary // and underlying queue becoming a primary, so isPrimaryWithWait() // waits for some time for the queue to become a primary on this member final HDFSBucketRegionQueue brq = hdfsQueue.getBucketRegionQueue( this.owner.getPartitionedRegion(), this.owner.getId()); if (brq != null) { if (owner.getBucketAdvisor().isPrimary() && !brq.getBucketAdvisor().isPrimaryWithWait()) { InternalDistributedMember primaryHolder = brq.getBucketAdvisor() .basicGetPrimaryMember(); throw new PrimaryBucketException("Bucket " + brq.getName() + " is not primary. Current primary holder is " + primaryHolder); } } return hdfsQueue; }