Refine search
public OStorage createStorage(final String dbName, final Map<String, String> configuration, long maxWalSegSize) { try { return new OLocalPaginatedStorage(dbName, dbName, getMode(configuration), generateStorageId(), readCache, files, maxWalSegSize); } catch (Exception e) { final String message = "Error on opening database: " + dbName + ". Current location is: " + new java.io.File(".").getAbsolutePath(); OLogManager.instance().error(this, message, e); throw OException.wrapException(new ODatabaseException(message), e); } }
@Override public byte[] toStream() throws OSerializationException { ByteArrayOutputStream output = new ByteArrayOutputStream(); try { ObjectOutputStream writer = new ObjectOutputStream(output); writer.writeObject(serializable); writer.close(); } catch (IOException e) { throw OException.wrapException(new ODatabaseException("Error on serialization of Serializable"), e); } return output.toByteArray(); }
OLogManager.instance().error(this, "Error on creating record in cluster: " + cluster, e); throw ODatabaseException.wrapException(new OStorageException("Error during creation of record"), e); } finally { atomicOperationsManager.endAtomicOperation(rollback); if (OLogManager.instance().isDebugEnabled()) { OLogManager.instance().debug(this, "Created record %s v.%s size=%d bytes", rid, recordVersion, content.length); throw OException.wrapException( new OStorageException("Error during record deletion in cluster " + (cluster != null ? cluster.getName() : "")), ioe);
OLogManager.instance() .warn(this, "You use deprecated record locking strategy: %s it may lead to deadlocks " + lockingStrategy); record.lock(false); OLogManager.instance() .warn(this, "You use deprecated record locking strategy: %s it may lead to deadlocks " + lockingStrategy); record.lock(true); } catch (Exception t) { if (rid.isTemporary()) throw OException.wrapException(new ODatabaseException("Error on retrieving record using temporary RID: " + rid), t); else throw OException.wrapException(new ODatabaseException( "Error on retrieving record " + rid + " (cluster: " + getStorage().getPhysicalClusterNameById(rid.getClusterId()) + ")"), t); } finally {
final File dirFile = new File(directoryPath); if (!dirFile.exists()) { OLogManager.instance().infoNoDb(this, "Directory " + dirFile + " does not exist, try to create it."); OLogManager.instance().errorNoDb(this, "Can not create directory " + dirFile, null); throw new ODatabaseException("Invalid configuration settings. Can not set maximum size of WAL segment"); OLogManager.instance().infoNoDb(this, "WAL maximum segment size is set to %,d MB", maxWALSegmentSize / 1024 / 1024); } catch (IOException e) { throw OException.wrapException(new ODatabaseException("Cannot initialize OrientDB engine"), e);
OLogManager.instance().error(this, "There is no atomic operation active", null); throw new ODatabaseException("There is no atomic operation active");
@Override public long getClusterRecordSizeById(final int clusterId) { checkIfActive(); try { return getStorage().getClusterById(clusterId).getRecordsSize(); } catch (Exception e) { throw OException .wrapException(new ODatabaseException("Error on reading records size for cluster with id '" + clusterId + "'"), e); } }
/** * Gives new connection to database. If current factory configured to use pool (see {@link #setupPool(int, int)} method), * retrieves connection from pool. Otherwise creates new connection each time. * * @param iCreate if true automatically creates database if database with given URL does not exist * @param iOpen if true automatically opens the database * * @return database */ public ODatabaseDocumentTx getDatabase(final boolean iCreate, final boolean iOpen) { if (pool != null) return pool.acquire(); final ODatabaseDocument db = new ODatabaseDocumentTx(url); if (properties != null) { properties.entrySet().forEach(e -> db.setProperty(e.getKey(), e.getValue())); } if (!db.getURL().startsWith("remote:") && !db.exists()) { if (iCreate) db.create(); else if (iOpen) throw new ODatabaseException("Database '" + url + "' not found"); } else if (iOpen) db.open(user, password); return (ODatabaseDocumentTx) db; }
String serializeName = getStorage().getConfiguration().getRecordSerializer(); if (serializeName == null) throw new ODatabaseException("Impossible to open database from version before 2.x use export import instead"); serializer = serializerFactory.getFormat(serializeName); if (serializer == null) throw new ODatabaseException("RecordSerializer with name '" + serializeName + "' not found "); if (getStorage().getConfiguration().getRecordSerializerVersion() > serializer.getMinSupportedVersion()) throw new ODatabaseException("Persistent record serializer version is not support by the current implementation"); } catch (Exception e) { ODatabaseRecordThreadLocal.instance().remove(); throw OException.wrapException(new ODatabaseException("Cannot open database url=" + getURL()), e);
protected void checkOpenness() { if (ownerPool == null) throw new ODatabaseException( "Database instance has been released to the pool. Get another database instance from the pool with the right username and password"); // super.checkOpenness(); } }
public ODatabaseDocumentEmbedded(final OStorage storage) { activateOnCurrentThread(); try { status = STATUS.CLOSED; // OVERWRITE THE URL url = storage.getURL(); this.storage = storage; this.componentsFactory = storage.getComponentsFactory(); unmodifiableHooks = Collections.unmodifiableMap(hooks); localCache = new OLocalRecordCache(); init(); databaseOwner = this; } catch (Exception t) { ODatabaseRecordThreadLocal.instance().remove(); throw OException.wrapException(new ODatabaseException("Error on opening database "), t); } }
protected void truncateClusterInternal(final String clusterName, final ODatabaseDocumentInternal database) { final OCluster cluster = database.getStorage().getClusterByName(clusterName); if (cluster == null) { throw new ODatabaseException("Cluster with name " + clusterName + " does not exist"); } try { database.checkForClusterPermissions(clusterName); cluster.truncate(); } catch (IOException e) { throw OException.wrapException(new ODatabaseException("Error during truncate of cluster " + clusterName), e); } for (OIndex index : getIndexes()) { index.rebuild(); } }
protected void checkOpenness() { if (internal == null) throw new ODatabaseException("Database '" + getURL() + "' is closed"); }
@Override public void reload() { checkIfActive(); if (this.isClosed()) throw new ODatabaseException("Cannot reload a closed db"); metadata.reload(); getStorage().reload(); }
if (rid.getClusterId() <= ORID.CLUSTER_ID_INVALID && getStorage().isAssigningClusterIds()) { if (record instanceof ODocument) { schemaClass = ODocumentInternal.getImmutableSchemaClass(this, ((ODocument) record)); rid.setClusterId(schemaClass.getClusterForNewInstance((ODocument) record)); } else throw new ODatabaseException("Cannot save (1) document " + record + ": no class or cluster defined"); } else { if (record instanceof ORecordBytes) { throw new ODatabaseException("Cannot save (3) document " + record + ": no class or cluster defined");
throw new ODatabaseException( "Cannot hide record because it has no identity. Probably was created from scratch or contains projections of fields rather than a full record"); operationResult = getStorage().hideRecord(rid, iMode.ordinal(), null);
@Override public void flushTillSegment(final long segmentId) { final Future<Void> future = commitExecutor.submit(new FlushTillSegmentTask(segmentId)); try { future.get(); } catch (final Exception e) { throw ODatabaseException.wrapException(new OStorageException("Error during data flush"), e); } }
@Override public OSerializableStream fromStream(byte[] iStream) throws OSerializationException { ByteArrayInputStream stream = new ByteArrayInputStream(iStream); try { ObjectInputStream reader = new ObjectInputStream(stream); serializable = (Serializable) reader.readObject(); reader.close(); } catch (Exception e) { throw OException.wrapException(new ODatabaseException("Error on deserialization of Serializable"), e); } return this; }
@Override public long getClusterRecordSizeByName(final String clusterName) { checkIfActive(); try { return getStorage().getClusterById(getClusterIdByName(clusterName)).getRecordsSize(); } catch (Exception e) { throw OException.wrapException(new ODatabaseException("Error on reading records size for cluster '" + clusterName + "'"), e); } }