among_var = find_among(a_0, 13); if (among_var == 0) slice_from("a"); break; case 2: slice_from("a"); break; case 3: slice_from("e"); break; case 4: slice_from("e"); break; case 5: slice_from("i"); break; case 6: slice_from("i"); break; case 7: slice_from("o"); break;
among_var = find_among_b(a_3, 283); if (among_var == 0) if (!r_R1()) slice_del(); break; case 2: if (!r_R2()) slice_del(); break;
if (!(in_grouping(g_v, 97, 252))) if (!(out_grouping(g_v, 97, 252))) if (!(in_grouping(g_v, 97, 252))) if (!(out_grouping(g_v, 97, 252)))
private boolean r_attached_pronoun() { int among_var; // (, line 75 // [, line 76 ket = cursor; // substring, line 76 among_var = find_among_b(a_1, 39); if (among_var == 0) { return false; } // ], line 76 bra = cursor; switch(among_var) { case 0: return false; case 1: // (, line 86 // call R1, line 86 if (!r_R1()) { return false; } // delete, line 86 slice_del(); break; } return true; }
lab0: do { if (!r_mark_regions()) lab1: do { if (!r_attached_pronoun()) lab4: do { if (!r_standard_suffix()) cursor = limit - v_4; if (!r_verb_suffix()) lab5: do { if (!r_residual_suffix()) lab6: do { if (!r_cleaning())
/** * Creates a * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link ElisionFilter}, {@link LowerCaseFilter}, * {@link StopFilter}, {@link SetKeywordMarkerFilter} if a stem exclusion set is * provided and {@link SnowballFilter}. */ @Override protected TokenStreamComponents createComponents(String fieldName) { final Tokenizer source = new StandardTokenizer(); TokenStream result = new ElisionFilter(source, DEFAULT_ARTICLES); result = new LowerCaseFilter(result); result = new StopFilter(result, stopwords); if(!stemExclusionSet.isEmpty()) result = new SetKeywordMarkerFilter(result, stemExclusionSet); result = new SnowballFilter(result, new CatalanStemmer()); return new TokenStreamComponents(source, result); }
lab0: do { if (!r_mark_regions()) lab1: do { if (!r_attached_pronoun()) lab4: do { if (!r_standard_suffix()) cursor = limit - v_4; if (!r_verb_suffix()) lab5: do { if (!r_residual_suffix()) lab6: do { if (!r_cleaning())
private boolean r_attached_pronoun() { int among_var; // (, line 75 // [, line 76 ket = cursor; // substring, line 76 among_var = find_among_b(a_1, 39); if (among_var == 0) { return false; } // ], line 76 bra = cursor; switch(among_var) { case 0: return false; case 1: // (, line 86 // call R1, line 86 if (!r_R1()) { return false; } // delete, line 86 slice_del(); break; } return true; }
@Override public TokenStream apply(final TokenStream input) { return new SnowballFilter(input, new CatalanStemmer()); } };
among_var = find_among_b(a_4, 22); if (among_var == 0) if (!r_R1()) slice_del(); break; case 2: if (!r_R1()) slice_from("ic"); break;
lab0: do { if (!r_mark_regions()) lab1: do { if (!r_attached_pronoun()) lab4: do { if (!r_standard_suffix()) cursor = limit - v_4; if (!r_verb_suffix()) lab5: do { if (!r_residual_suffix()) lab6: do { if (!r_cleaning())
private boolean r_attached_pronoun() { int among_var; // (, line 75 // [, line 76 ket = cursor; // substring, line 76 among_var = find_among_b(a_1, 39); if (among_var == 0) { return false; } // ], line 76 bra = cursor; switch(among_var) { case 0: return false; case 1: // (, line 86 // call R1, line 86 if (!r_R1()) { return false; } // delete, line 86 slice_del(); break; } return true; }
@Override public TokenStream getTokenStream(Tokenizer tokenizer, CharArraySet stemExclusionSet) { TokenStream stream = new StandardFilter(matchVersion, tokenizer); if (caseInsensitive) stream = new LowerCaseFilter(matchVersion, stream); if (useStopWords) { stream = new ElisionFilter(stream, DEFAULT_ARTICLES); stream = new StopFilter(matchVersion, stream, DanishAnalyzer.getDefaultStopSet()); } if (useStem) { if (!stemExclusionSet.isEmpty()) stream = new SetKeywordMarkerFilter(stream, stemExclusionSet); stream = new SnowballFilter(stream, new CatalanStemmer()); } return stream; } }
if (!(in_grouping(g_v, 97, 252))) if (!(out_grouping(g_v, 97, 252))) if (!(in_grouping(g_v, 97, 252))) if (!(out_grouping(g_v, 97, 252)))
among_var = find_among(a_0, 13); if (among_var == 0) slice_from("a"); break; case 2: slice_from("a"); break; case 3: slice_from("e"); break; case 4: slice_from("e"); break; case 5: slice_from("i"); break; case 6: slice_from("i"); break; case 7: slice_from("o"); break;
among_var = find_among_b(a_2, 200); if (among_var == 0) if (!r_R1()) slice_del(); break; case 2: if (!r_R2()) slice_del(); break; case 3: if (!r_R2()) slice_from("log"); break; case 4: if (!r_R2()) slice_from("ic"); break; case 5: if (!r_R1())
/** * Creates a * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is * provided and {@link SnowballFilter}. */ @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { final Tokenizer source = new StandardTokenizer(matchVersion, reader); TokenStream result = new StandardFilter(matchVersion, source); if (matchVersion.onOrAfter(Version.LUCENE_36)) { result = new ElisionFilter(matchVersion, result, DEFAULT_ARTICLES); } result = new LowerCaseFilter(matchVersion, result); result = new StopFilter(matchVersion, result, stopwords); if(!stemExclusionSet.isEmpty()) result = new KeywordMarkerFilter(result, stemExclusionSet); result = new SnowballFilter(result, new CatalanStemmer()); return new TokenStreamComponents(source, result); } }
if (!(in_grouping(g_v, 97, 252))) if (!(out_grouping(g_v, 97, 252))) if (!(in_grouping(g_v, 97, 252))) if (!(out_grouping(g_v, 97, 252)))
among_var = find_among(a_0, 13); if (among_var == 0) slice_from("a"); break; case 2: slice_from("a"); break; case 3: slice_from("e"); break; case 4: slice_from("e"); break; case 5: slice_from("i"); break; case 6: slice_from("i"); break; case 7: slice_from("o"); break;
among_var = find_among_b(a_3, 283); if (among_var == 0) if (!r_R1()) slice_del(); break; case 2: if (!r_R2()) slice_del(); break;