@Override public void close() throws IOException { updater.close(false); updater = null; }
@Override public void closeBatch() throws StreamingIOFailure { boolean haveError = false; for (RecordUpdater updater : updaters) { if (updater != null) { try { //try not to leave any files open updater.close(false); } catch (Exception ex) { haveError = true; LOG.error("Unable to close " + updater + " due to: " + ex.getMessage(), ex); } } } updaters.clear(); if(haveError) { throw new StreamingIOFailure("Encountered errors while closing (see logs) " + getWatermark()); } }
try { updater.close(false); } catch (Exception ex) { haveError = true;
@Test public void testCloseDelegates() throws IOException { mutator.close(); verify(mockRecordUpdater).close(false); }
public void closeWriters(boolean abort) throws HiveException { for (int idx = 0; idx < outWriters.length; idx++) { if (outWriters[idx] != null) { try { outWriters[idx].close(abort); updateProgress(); } catch (IOException e) { throw new HiveException(e); } } } try { for (int i = 0; i < updaters.length; i++) { if (updaters[i] != null) { updaters[i].close(abort); } } } catch (IOException e) { throw new HiveException(e); } }
public void closeWriters(boolean abort) throws HiveException { for (int idx = 0; idx < outWriters.length; idx++) { if (outWriters[idx] != null) { try { outWriters[idx].close(abort); updateProgress(); } catch (IOException e) { throw new HiveException(e); } } } try { for (int i = 0; i < updaters.length; i++) { if (updaters[i] != null) { updaters[i].close(abort); } } } catch (IOException e) { throw new HiveException(e); } }
assertEquals(true, outDump.contains("Compression size: 2048")); System.setOut(origOut); updater.close(false);
ru.close(false);
ru.insert(options.getMinimumWriteId(), new MyRow(i, 2 * i)); ru.close(false);//this deletes the side file
ru.insert(options.getMinimumWriteId(), new MyRow(i, 2 * i)); ru.close(false);//this deletes the side file
ru.insert(0, new MyRow(values[i])); ru.close(false);
.inspector(inspector).bucket(BUCKET).writingBase(true) .maximumWriteId(100).finalDestination(root); of.getRecordUpdater(root, options).close(false);
updater.insert(10000002, new BigRow(data, 1, 0, bucket)); updater.insert(10000002, new BigRow(data, 2, 0, bucket)); updater.close(false);
updater.close(false);
updater.close(false); reader = OrcFile.createReader(bucketPath, new OrcFile.ReaderOptions(conf).filesystem(fs));
updater.delete(100, new MyRow("", 60, 40, bucket)); assertEquals(-1L, updater.getStats().getRowCount()); updater.close(false); Path bucketPath = AcidUtils.createFilename(root, options);