/** * Creates a * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link LowerCaseFilter}, {@link StopFilter} * , {@link SetKeywordMarkerFilter} if a stem exclusion set is * provided and {@link SnowballFilter}. */ @Override protected TokenStreamComponents createComponents(String fieldName) { final Tokenizer source = new StandardTokenizer(); TokenStream result = new LowerCaseFilter(source); result = new StopFilter(result, stopwords); if(!stemExclusionSet.isEmpty()) result = new SetKeywordMarkerFilter(result, stemExclusionSet); result = new SnowballFilter(result, new SwedishStemmer()); return new TokenStreamComponents(source, result); }
among_var = find_among_b(a_0, 37); if (among_var == 0) slice_del(); break; case 2: if (!(in_grouping_b(g_s_ending, 98, 121))) slice_del(); break;
if (!(in_grouping(g_v, 97, 246))) if (!(out_grouping(g_v, 97, 246)))
lab0: do { if (!r_mark_regions()) lab1: do { if (!r_main_suffix()) lab2: do { if (!r_consonant_pair()) lab3: do { if (!r_other_suffix())
if (find_among_b(a_1, 7) == 0) slice_del(); limit_backward = v_2; return true;
among_var = find_among_b(a_2, 5); if (among_var == 0) slice_del(); break; case 2: slice_from("l\u00F6s"); break; case 3: slice_from("full"); break;
lab0: do { if (!r_mark_regions()) lab1: do { if (!r_main_suffix()) lab2: do { if (!r_consonant_pair()) lab3: do { if (!r_other_suffix())
among_var = find_among_b(a_2, 5); if (among_var == 0) slice_del(); break; case 2: slice_from("l\u00F6s"); break; case 3: slice_from("full"); break;
if (find_among_b(a_1, 7) == 0) slice_del(); limit_backward = v_2; return true;
public SwedishSnowballStemmer() { super(new SwedishStemmer()); } public Language getLanguage() { return Language.SWEDISH; }
lab0: do { if (!r_mark_regions()) lab1: do { if (!r_main_suffix()) lab2: do { if (!r_consonant_pair()) lab3: do { if (!r_other_suffix())
among_var = find_among_b(a_0, 37); if (among_var == 0) slice_del(); break; case 2: if (!(in_grouping_b(g_s_ending, 98, 121))) slice_del(); break;
among_var = find_among_b(a_2, 5); if (among_var == 0) slice_del(); break; case 2: slice_from("l\u00F6s"); break; case 3: slice_from("full"); break;
if (find_among_b(a_1, 7) == 0) slice_del(); limit_backward = v_2; return true;
if (!(in_grouping(g_v, 97, 246))) if (!(out_grouping(g_v, 97, 246)))
@Override public TokenStream apply(final TokenStream input) { return new SnowballFilter(input, new SwedishStemmer()); } };
among_var = find_among_b(a_0, 37); if (among_var == 0) slice_del(); break; case 2: if (!(in_grouping_b(g_s_ending, 98, 121))) slice_del(); break;
if (!(in_grouping(g_v, 97, 246))) if (!(out_grouping(g_v, 97, 246)))
static public TokenStream swedish(TokenStream result) { result = new LowerCaseFilter(result); result = new SnowballFilter(result, new SwedishStemmer()); return result; }
@Override public TokenStream getTokenStream(Tokenizer tokenizer, CharArraySet stemExclusionSet) { TokenStream stream = new StandardFilter(matchVersion, tokenizer); if (caseInsensitive) stream = new LowerCaseFilter(matchVersion, stream); if (useStopWords) stream = new StopFilter(matchVersion, stream, SwedishAnalyzer.getDefaultStopSet()); if (useStem) { if (!stemExclusionSet.isEmpty()) stream = new SetKeywordMarkerFilter(stream, stemExclusionSet); stream = new SnowballFilter(stream, new SwedishStemmer()); } return stream; } }