ObjectPair<HiveKey, BytesWritable> pair = readBuffer[i]; pair.setFirst(readHiveKey(input)); pair.setSecond(readValue(input));
ObjectPair<HiveKey, BytesWritable> pair = readBuffer[i]; pair.setFirst(readHiveKey(input)); pair.setSecond(readValue(input)); pair.getFirst(), pair.getSecond()); pair.setFirst(null); pair.setSecond(null); writeCursor = 0; return row; pair.getFirst(), pair.getSecond()); pair.setFirst(null); pair.setSecond(null); if (++readCursor >= rowsInReadBuffer) { readBufferUsed = false;
public synchronized void add(HiveKey key, BytesWritable value) { if (writeCursor >= IN_MEMORY_NUM_ROWS) { // Write buffer is full if (!readBufferUsed) { // Read buffer isn't used, switch buffer switchBufferAndResetCursor(); } else { // Need to spill from write buffer to disk try { if (output == null) { setupOutput(); } for (int i = 0; i < IN_MEMORY_NUM_ROWS; i++) { ObjectPair<HiveKey, BytesWritable> pair = writeBuffer[i]; writeHiveKey(output, pair.getFirst()); writeValue(output, pair.getSecond()); pair.setFirst(null); pair.setSecond(null); } writeCursor = 0; } catch (Exception e) { clear(); // Clean up the cache throw new RuntimeException("Failed to spill rows to disk", e); } } } ObjectPair<HiveKey, BytesWritable> pair = writeBuffer[writeCursor++]; pair.setFirst(key); pair.setSecond(value); }
public synchronized void add(HiveKey key, BytesWritable value) { if (writeCursor >= IN_MEMORY_NUM_ROWS) { // Write buffer is full if (!readBufferUsed) { // Read buffer isn't used, switch buffer switchBufferAndResetCursor(); } else { // Need to spill from write buffer to disk try { if (output == null) { setupOutput(); } for (int i = 0; i < IN_MEMORY_NUM_ROWS; i++) { ObjectPair<HiveKey, BytesWritable> pair = writeBuffer[i]; writeHiveKey(output, pair.getFirst()); writeValue(output, pair.getSecond()); pair.setFirst(null); pair.setSecond(null); } writeCursor = 0; } catch (Exception e) { clear(); // Clean up the cache throw new RuntimeException("Failed to spill rows to disk", e); } } } ObjectPair<HiveKey, BytesWritable> pair = writeBuffer[writeCursor++]; pair.setFirst(key); pair.setSecond(value); }
tem.setSecond(ReflectionUtils.copy(job, value, tem.getSecond())); buffer.add(tem);
tem.setSecond(ReflectionUtils.copy(job, value, tem.getSecond())); buffer.add(tem);
ObjectPair<HiveKey, BytesWritable> pair = readBuffer[i]; pair.setFirst(readHiveKey(input)); pair.setSecond(readValue(input)); pair.getFirst(), pair.getSecond()); pair.setFirst(null); pair.setSecond(null); writeCursor = 0; return row; pair.getFirst(), pair.getSecond()); pair.setFirst(null); pair.setSecond(null); if (++readCursor >= rowsInReadBuffer) { readBufferUsed = false;
ObjectPair<HiveKey, BytesWritable> pair = readBuffer[i]; pair.setFirst(readHiveKey(input)); pair.setSecond(readValue(input));
private boolean next(Integer current) throws IOException, HiveException { if (keyFields == null) { byte tag = tagForAlias(alias); // joinKeys/joinKeysOI are initialized after making merge queue, so setup lazily at runtime keyFields = joinKeys[tag]; keyFieldOIs = joinKeysObjectInspectors[tag]; } InspectableObject nextRow = segments[current].getNextRow(); while (nextRow != null) { sinkOp.reset(); if (keys[current] == null) { keys[current] = new ObjectPair<List<Object>, InspectableObject>(); } // Pass the row though the operator tree. It is guaranteed that not more than 1 row can // be produced from a input row. forwardOp.process(nextRow.o, 0); nextRow = sinkOp.getResult(); // It is possible that the row got absorbed in the operator tree. if (nextRow.o != null) { // todo this should be changed to be evaluated lazily, especially for single segment case keys[current].setFirst(JoinUtil.computeKeys(nextRow.o, keyFields, keyFieldOIs)); keys[current].setSecond(nextRow); return true; } nextRow = segments[current].getNextRow(); } keys[current] = null; return false; } }
private boolean next(Integer current) throws IOException, HiveException { if (keyFields == null) { byte tag = tagForAlias(alias); // joinKeys/joinKeysOI are initialized after making merge queue, so setup lazily at runtime keyFields = joinKeys[tag]; keyFieldOIs = joinKeysObjectInspectors[tag]; } InspectableObject nextRow = segments[current].getNextRow(); while (nextRow != null) { sinkOp.reset(); if (keys[current] == null) { keys[current] = new ObjectPair<List<Object>, InspectableObject>(); } // Pass the row though the operator tree. It is guaranteed that not more than 1 row can // be produced from a input row. forwardOp.process(nextRow.o, 0); nextRow = sinkOp.getResult(); // It is possible that the row got absorbed in the operator tree. if (nextRow.o != null) { // todo this should be changed to be evaluated lazily, especially for single segment case keys[current].setFirst(JoinUtil.computeKeys(nextRow.o, keyFields, keyFieldOIs)); keys[current].setSecond(nextRow); return true; } nextRow = segments[current].getNextRow(); } keys[current] = null; return false; } }
private boolean topLevelConjunctCheck(ASTNode searchCond, ObjectPair<Boolean, Integer> subqInfo) { if( searchCond.getType() == HiveParser.KW_OR) { subqInfo.setFirst(Boolean.TRUE); if(subqInfo.getSecond() > 1) { return false; } } if( searchCond.getType() == HiveParser.TOK_SUBQUERY_EXPR) { subqInfo.setSecond(subqInfo.getSecond() + 1); if(subqInfo.getSecond()> 1 && subqInfo.getFirst()) { return false; } return true; } for(int i=0; i<searchCond.getChildCount(); i++){ boolean validSubQuery = topLevelConjunctCheck((ASTNode)searchCond.getChild(i), subqInfo); if(!validSubQuery) { return false; } } return true; }
public synchronized void add(HiveKey key, BytesWritable value) { if (writeCursor >= IN_MEMORY_NUM_ROWS) { // Write buffer is full if (!readBufferUsed) { // Read buffer isn't used, switch buffer switchBufferAndResetCursor(); } else { // Need to spill from write buffer to disk try { if (output == null) { setupOutput(); } for (int i = 0; i < IN_MEMORY_NUM_ROWS; i++) { ObjectPair<HiveKey, BytesWritable> pair = writeBuffer[i]; writeHiveKey(output, pair.getFirst()); writeValue(output, pair.getSecond()); pair.setFirst(null); pair.setSecond(null); } writeCursor = 0; } catch (Exception e) { clear(); // Clean up the cache throw new RuntimeException("Failed to spill rows to disk", e); } } } ObjectPair<HiveKey, BytesWritable> pair = writeBuffer[writeCursor++]; pair.setFirst(key); pair.setSecond(value); }
tem.setSecond(ReflectionUtils.copy(job, value, tem.getSecond())); buffer.add(tem);
ObjectPair<HiveKey, BytesWritable> pair = readBuffer[i]; pair.setFirst(readHiveKey(input)); pair.setSecond(readValue(input)); pair.getFirst(), pair.getSecond()); pair.setFirst(null); pair.setSecond(null); writeCursor = 0; return row; pair.getFirst(), pair.getSecond()); pair.setFirst(null); pair.setSecond(null); if (++readCursor >= rowsInReadBuffer) { readBufferUsed = false;
ObjectPair<HiveKey, BytesWritable> pair = readBuffer[i]; pair.setFirst(readHiveKey(input)); pair.setSecond(readValue(input));
private boolean next(Integer current) throws IOException, HiveException { if (keyFields == null) { byte tag = tagForAlias(alias); // joinKeys/joinKeysOI are initialized after making merge queue, so setup lazily at runtime keyFields = joinKeys[tag]; keyFieldOIs = joinKeysObjectInspectors[tag]; } InspectableObject nextRow = segments[current].getNextRow(); while (nextRow != null) { sinkOp.reset(); if (keys[current] == null) { keys[current] = new ObjectPair<List<Object>, InspectableObject>(); } // Pass the row though the operator tree. It is guaranteed that not more than 1 row can // be produced from a input row. forwardOp.process(nextRow.o, 0); nextRow = sinkOp.getResult(); // It is possible that the row got absorbed in the operator tree. if (nextRow.o != null) { // todo this should be changed to be evaluated lazily, especially for single segment case keys[current].setFirst(JoinUtil.computeKeys(nextRow.o, keyFields, keyFieldOIs)); keys[current].setSecond(nextRow); return true; } nextRow = segments[current].getNextRow(); } keys[current] = null; return false; } }