analyzer.py 1.4 KB

12345678910111213141516171819202122232425262728293031323334353637
  1. # encoding=utf-8
  2. from __future__ import unicode_literals
  3. from whoosh.analysis import RegexAnalyzer, LowercaseFilter, StopFilter, StemFilter
  4. from whoosh.analysis import Tokenizer, Token
  5. from whoosh.lang.porter import stem
  6. import jieba
  7. import re
  8. STOP_WORDS = frozenset(('a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'can',
  9. 'for', 'from', 'have', 'if', 'in', 'is', 'it', 'may',
  10. 'not', 'of', 'on', 'or', 'tbd', 'that', 'the', 'this',
  11. 'to', 'us', 'we', 'when', 'will', 'with', 'yet',
  12. 'you', 'your', '的', '了', '和'))
  13. accepted_chars = re.compile(r"[\u4E00-\u9FD5]+")
  14. class ChineseTokenizer(Tokenizer):
  15. def __call__(self, text, **kargs):
  16. words = jieba.tokenize(text, mode="search")
  17. token = Token()
  18. for (w, start_pos, stop_pos) in words:
  19. if not accepted_chars.match(w) and len(w) <= 1:
  20. continue
  21. token.original = token.text = w
  22. token.pos = start_pos
  23. token.startchar = start_pos
  24. token.endchar = stop_pos
  25. yield token
  26. def ChineseAnalyzer(stoplist=STOP_WORDS, minsize=1, stemfn=stem, cachesize=50000):
  27. return (ChineseTokenizer() | LowercaseFilter() |
  28. StopFilter(stoplist=stoplist, minsize=minsize) |
  29. StemFilter(stemfn=stemfn, ignore=None, cachesize=cachesize))