@Deprecated public PrefixCoder getCoder() { return coder(); } @Deprecated
@Deprecated public Decoder getDecoder() { return decoder(); } }
public HuTuckerCodec( final int[] frequency ) { this( intArray2LongArray( frequency ) ); }
return new Object[] { new HuTuckerCodec( packedFrequency ).coder().codeWords(), char2symbol };
markRec(node[minLeft], 0); root = rebuildTree(node[minLeft]); decoder = new TreeDecoder(root, size); coder = new CodeWordCoder(decoder.buildCodes());
codec = new HuTuckerCodec( packedFrequency ); prefixCoder = codec.coder(); decoder = codec.decoder();
/** We scan recursively the tree, making a copy that uses lightweight nodes. */ private TreeDecoder.Node rebuildTree( final LevelNode n ) { if ( n == null ) return null; if ( n.symbol != -1 ) return new TreeDecoder.LeafNode( n.symbol ); TreeDecoder.Node newNode = new TreeDecoder.Node(); newNode.left = rebuildTree( (LevelNode) n.left ); newNode.right = rebuildTree( (LevelNode) n.right ); return newNode; }
private static Object[] getCoder(final Iterable<? extends CharSequence> iterable, boolean prefixFree) { // First of all, we gather frequencies for all Unicode characters long[] frequency = new long[Character.MAX_VALUE + 1]; int maxWordLength = 0; CharSequence s; int n = 0; for(Iterator<? extends CharSequence> i = iterable.iterator(); i.hasNext();) { s = i.next(); maxWordLength = Math.max(s.length(), maxWordLength); for(int j = s.length(); j-- != 0;) frequency[s.charAt(j)]++; n++; } // Then, we compute the number of actually used characters. We count from the start the stop character. int count = prefixFree ? 1 : 0; for(int i = frequency.length; i-- != 0;) if (frequency[i] != 0) count++; /* Now we remap used characters in f, building at the same time the map from characters to symbols (except for the stop character). */ long[] packedFrequency = new long[count]; final Char2IntMap char2symbol = new Char2IntOpenHashMap(count); for(int i = frequency.length, k = count; i-- != 0;) { if (frequency[i] != 0) { packedFrequency[--k] = frequency[i]; char2symbol.put((char)i, k); } } if (prefixFree) packedFrequency[0] = n; // The stop character appears once in each string. // We now build the coder used to code the strings return new Object[] { new HuTuckerCodec(packedFrequency).coder().codeWords(), char2symbol }; }
markRec( node[ minLeft ], 0 ); root = rebuildTree( node[ minLeft ] ); decoder = new TreeDecoder( root, size ); coder = new CodeWordCoder( decoder.buildCodes() );
codec = new HuTuckerCodec(packedFrequency); prefixCoder = codec.coder(); decoder = codec.decoder();
/** We scan recursively the tree, making a copy that uses lightweight nodes. */ private TreeDecoder.Node rebuildTree( final LevelNode n ) { if ( n == null ) return null; if ( n.symbol != -1 ) return new TreeDecoder.LeafNode( n.symbol ); TreeDecoder.Node newNode = new TreeDecoder.Node(); newNode.left = rebuildTree( (LevelNode) n.left ); newNode.right = rebuildTree( (LevelNode) n.right ); return newNode; }
return new Object[] { new HuTuckerCodec( packedFrequency ).coder().codeWords(), char2symbol };
markRec( node[ minLeft ], 0 ); root = rebuildTree( node[ minLeft ] ); decoder = new TreeDecoder( root, size ); coder = new CodeWordCoder( decoder.buildCodes() );
codec = new HuTuckerCodec( packedFrequency ); prefixCoder = codec.coder(); decoder = codec.decoder();
/** We scan recursively the tree, making a copy that uses lightweight nodes. */ private TreeDecoder.Node rebuildTree(final LevelNode n) { if (n == null) return null; if (n.symbol != -1) return new TreeDecoder.LeafNode(n.symbol); TreeDecoder.Node newNode = new TreeDecoder.Node(); newNode.left = rebuildTree((LevelNode) n.left); newNode.right = rebuildTree((LevelNode) n.right); return newNode; }
@Deprecated public PrefixCoder getCoder() { return coder(); } @Deprecated
public HuTuckerCodec( final int[] frequency ) { this( intArray2LongArray( frequency ) ); }