compound.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. # Copyright 2007 Matt Chaput. All rights reserved.
  2. #
  3. # Redistribution and use in source and binary forms, with or without
  4. # modification, are permitted provided that the following conditions are met:
  5. #
  6. # 1. Redistributions of source code must retain the above copyright notice,
  7. # this list of conditions and the following disclaimer.
  8. #
  9. # 2. Redistributions in binary form must reproduce the above copyright
  10. # notice, this list of conditions and the following disclaimer in the
  11. # documentation and/or other materials provided with the distribution.
  12. #
  13. # THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
  14. # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  15. # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
  16. # EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  17. # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  18. # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
  19. # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  20. # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  21. # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  22. # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  23. #
  24. # The views and conclusions contained in the software and documentation are
  25. # those of the authors and should not be interpreted as representing official
  26. # policies, either expressed or implied, of Matt Chaput.
  27. from __future__ import division
  28. from whoosh import matching
  29. from whoosh.compat import text_type, u
  30. from whoosh.compat import xrange
  31. from whoosh.query import qcore
  32. from whoosh.util import make_binary_tree, make_weighted_tree
  33. class CompoundQuery(qcore.Query):
  34. """Abstract base class for queries that combine or manipulate the results
  35. of multiple sub-queries .
  36. """
  37. def __init__(self, subqueries, boost=1.0):
  38. for subq in subqueries:
  39. if not isinstance(subq, qcore.Query):
  40. raise qcore.QueryError("%r is not a query" % subq)
  41. self.subqueries = subqueries
  42. self.boost = boost
  43. def __repr__(self):
  44. r = "%s(%r" % (self.__class__.__name__, self.subqueries)
  45. if hasattr(self, "boost") and self.boost != 1:
  46. r += ", boost=%s" % self.boost
  47. r += ")"
  48. return r
  49. def __unicode__(self):
  50. r = u("(")
  51. r += self.JOINT.join([text_type(s) for s in self.subqueries])
  52. r += u(")")
  53. return r
  54. __str__ = __unicode__
  55. def __eq__(self, other):
  56. return (other
  57. and self.__class__ is other.__class__
  58. and self.subqueries == other.subqueries
  59. and self.boost == other.boost)
  60. def __getitem__(self, i):
  61. return self.subqueries.__getitem__(i)
  62. def __len__(self):
  63. return len(self.subqueries)
  64. def __iter__(self):
  65. return iter(self.subqueries)
  66. def __hash__(self):
  67. h = hash(self.__class__.__name__) ^ hash(self.boost)
  68. for q in self.subqueries:
  69. h ^= hash(q)
  70. return h
  71. def is_leaf(self):
  72. return False
  73. def children(self):
  74. return iter(self.subqueries)
  75. def apply(self, fn):
  76. return self.__class__([fn(q) for q in self.subqueries],
  77. boost=self.boost)
  78. def field(self):
  79. if self.subqueries:
  80. f = self.subqueries[0].field()
  81. if all(q.field() == f for q in self.subqueries[1:]):
  82. return f
  83. def estimate_size(self, ixreader):
  84. est = sum(q.estimate_size(ixreader) for q in self.subqueries)
  85. return min(est, ixreader.doc_count())
  86. def estimate_min_size(self, ixreader):
  87. from whoosh.query import Not
  88. subs = self.subqueries
  89. qs = [(q, q.estimate_min_size(ixreader)) for q in subs
  90. if not isinstance(q, Not)]
  91. pos = [minsize for q, minsize in qs if minsize > 0]
  92. if pos:
  93. neg = [q.estimate_size(ixreader) for q in subs
  94. if isinstance(q, Not)]
  95. size = min(pos) - sum(neg)
  96. if size > 0:
  97. return size
  98. return 0
  99. def normalize(self):
  100. from whoosh.query import Every, TermRange, NumericRange
  101. # Normalize subqueries and merge nested instances of this class
  102. subqueries = []
  103. for s in self.subqueries:
  104. s = s.normalize()
  105. if isinstance(s, self.__class__):
  106. subqueries += [ss.with_boost(ss.boost * s.boost) for ss in s]
  107. else:
  108. subqueries.append(s)
  109. # If every subquery is Null, this query is Null
  110. if all(q is qcore.NullQuery for q in subqueries):
  111. return qcore.NullQuery
  112. # If there's an unfielded Every inside, then this query is Every
  113. if any((isinstance(q, Every) and q.fieldname is None)
  114. for q in subqueries):
  115. return Every()
  116. # Merge ranges and Everys
  117. everyfields = set()
  118. i = 0
  119. while i < len(subqueries):
  120. q = subqueries[i]
  121. f = q.field()
  122. if f in everyfields:
  123. subqueries.pop(i)
  124. continue
  125. if isinstance(q, (TermRange, NumericRange)):
  126. j = i + 1
  127. while j < len(subqueries):
  128. if q.overlaps(subqueries[j]):
  129. qq = subqueries.pop(j)
  130. q = q.merge(qq, intersect=self.intersect_merge)
  131. else:
  132. j += 1
  133. q = subqueries[i] = q.normalize()
  134. if isinstance(q, Every):
  135. everyfields.add(q.fieldname)
  136. i += 1
  137. # Eliminate duplicate queries
  138. subqs = []
  139. seenqs = set()
  140. for s in subqueries:
  141. if not isinstance(s, Every) and s.field() in everyfields:
  142. continue
  143. if s in seenqs:
  144. continue
  145. seenqs.add(s)
  146. subqs.append(s)
  147. # Remove NullQuerys
  148. subqs = [q for q in subqs if q is not qcore.NullQuery]
  149. if not subqs:
  150. return qcore.NullQuery
  151. if len(subqs) == 1:
  152. sub = subqs[0]
  153. sub_boost = getattr(sub, "boost", 1.0)
  154. if not (self.boost == 1.0 and sub_boost == 1.0):
  155. sub = sub.with_boost(sub_boost * self.boost)
  156. return sub
  157. return self.__class__(subqs, boost=self.boost)
  158. def simplify(self, ixreader):
  159. subs = self.subqueries
  160. if subs:
  161. q = self.__class__([subq.simplify(ixreader) for subq in subs],
  162. boost=self.boost).normalize()
  163. else:
  164. q = qcore.NullQuery
  165. return q
  166. def matcher(self, searcher, context=None):
  167. # This method does a little sanity checking and then passes the info
  168. # down to the _matcher() method which subclasses must implement
  169. subs = self.subqueries
  170. if not subs:
  171. return matching.NullMatcher()
  172. if len(subs) == 1:
  173. m = subs[0].matcher(searcher, context)
  174. else:
  175. m = self._matcher(subs, searcher, context)
  176. return m
  177. def _matcher(self, subs, searcher, context):
  178. # Subclasses must implement this method
  179. raise NotImplementedError
  180. def _tree_matcher(self, subs, mcls, searcher, context, q_weight_fn,
  181. **kwargs):
  182. # q_weight_fn is a function which is called on each query and returns a
  183. # "weight" value which is used to build a huffman-like matcher tree. If
  184. # q_weight_fn is None, an order-preserving binary tree is used instead.
  185. # Create a matcher from the list of subqueries
  186. subms = [q.matcher(searcher, context) for q in subs]
  187. if len(subms) == 1:
  188. m = subms[0]
  189. elif q_weight_fn is None:
  190. m = make_binary_tree(mcls, subms, **kwargs)
  191. else:
  192. w_subms = [(q_weight_fn(q), m) for q, m in zip(subs, subms)]
  193. m = make_weighted_tree(mcls, w_subms, **kwargs)
  194. # If this query had a boost, add a wrapping matcher to apply the boost
  195. if self.boost != 1.0:
  196. m = matching.WrappingMatcher(m, self.boost)
  197. return m
  198. class And(CompoundQuery):
  199. """Matches documents that match ALL of the subqueries.
  200. >>> And([Term("content", u"render"),
  201. ... Term("content", u"shade"),
  202. ... Not(Term("content", u"texture"))])
  203. >>> # You can also do this
  204. >>> Term("content", u"render") & Term("content", u"shade")
  205. """
  206. # This is used by the superclass's __unicode__ method.
  207. JOINT = " AND "
  208. intersect_merge = True
  209. def requires(self):
  210. s = set()
  211. for q in self.subqueries:
  212. s |= q.requires()
  213. return s
  214. def estimate_size(self, ixreader):
  215. return min(q.estimate_size(ixreader) for q in self.subqueries)
  216. def _matcher(self, subs, searcher, context):
  217. r = searcher.reader()
  218. q_weight_fn = lambda q: 0 - q.estimate_size(r)
  219. return self._tree_matcher(subs, matching.IntersectionMatcher, searcher,
  220. context, q_weight_fn)
  221. class Or(CompoundQuery):
  222. """Matches documents that match ANY of the subqueries.
  223. >>> Or([Term("content", u"render"),
  224. ... And([Term("content", u"shade"), Term("content", u"texture")]),
  225. ... Not(Term("content", u"network"))])
  226. >>> # You can also do this
  227. >>> Term("content", u"render") | Term("content", u"shade")
  228. """
  229. # This is used by the superclass's __unicode__ method.
  230. JOINT = " OR "
  231. intersect_merge = False
  232. TOO_MANY_CLAUSES = 1024
  233. # For debugging: set the array_type property to control matcher selection
  234. AUTO_MATCHER = 0 # Use automatic heuristics to choose matcher
  235. DEFAULT_MATCHER = 1 # Use a binary tree of UnionMatchers
  236. SPLIT_MATCHER = 2 # Use a different strategy for short and long queries
  237. ARRAY_MATCHER = 3 # Use a matcher that pre-loads docnums and scores
  238. matcher_type = AUTO_MATCHER
  239. def __init__(self, subqueries, boost=1.0, minmatch=0, scale=None):
  240. """
  241. :param subqueries: a list of :class:`Query` objects to search for.
  242. :param boost: a boost factor to apply to the scores of all matching
  243. documents.
  244. :param minmatch: not yet implemented.
  245. :param scale: a scaling factor for a "coordination bonus". If this
  246. value is not None, it should be a floating point number greater
  247. than 0 and less than 1. The scores of the matching documents are
  248. boosted/penalized based on the number of query terms that matched
  249. in the document. This number scales the effect of the bonuses.
  250. """
  251. CompoundQuery.__init__(self, subqueries, boost=boost)
  252. self.minmatch = minmatch
  253. self.scale = scale
  254. def __unicode__(self):
  255. r = u("(")
  256. r += (self.JOINT).join([text_type(s) for s in self.subqueries])
  257. r += u(")")
  258. if self.minmatch:
  259. r += u(">%s") % self.minmatch
  260. return r
  261. __str__ = __unicode__
  262. def normalize(self):
  263. norm = CompoundQuery.normalize(self)
  264. if norm.__class__ is self.__class__:
  265. norm.minmatch = self.minmatch
  266. norm.scale = self.scale
  267. return norm
  268. def requires(self):
  269. if len(self.subqueries) == 1:
  270. return self.subqueries[0].requires()
  271. else:
  272. return set()
  273. def _matcher(self, subs, searcher, context):
  274. needs_current = context.needs_current if context else True
  275. weighting = context.weighting if context else None
  276. matcher_type = self.matcher_type
  277. if matcher_type == self.AUTO_MATCHER:
  278. dc = searcher.doc_count_all()
  279. if (len(subs) < self.TOO_MANY_CLAUSES
  280. and (needs_current
  281. or self.scale
  282. or len(subs) == 2
  283. or dc > 5000)):
  284. # If the parent matcher needs the current match, or there's just
  285. # two sub-matchers, use the standard binary tree of Unions
  286. matcher_type = self.DEFAULT_MATCHER
  287. else:
  288. # For small indexes, or too many clauses, just preload all
  289. # matches
  290. matcher_type = self.ARRAY_MATCHER
  291. if matcher_type == self.DEFAULT_MATCHER:
  292. # Implementation of Or that creates a binary tree of Union matchers
  293. cls = DefaultOr
  294. elif matcher_type == self.SPLIT_MATCHER:
  295. # Hybrid of pre-loading small queries and a binary tree of union
  296. # matchers for big queries
  297. cls = SplitOr
  298. elif matcher_type == self.ARRAY_MATCHER:
  299. # Implementation that pre-loads docnums and scores into an array
  300. cls = PreloadedOr
  301. else:
  302. raise ValueError("Unknown matcher_type %r" % self.matcher_type)
  303. return cls(subs, boost=self.boost, minmatch=self.minmatch,
  304. scale=self.scale).matcher(searcher, context)
  305. class DefaultOr(Or):
  306. JOINT = " dOR "
  307. def _matcher(self, subs, searcher, context):
  308. reader = searcher.reader()
  309. q_weight_fn = lambda q: q.estimate_size(reader)
  310. m = self._tree_matcher(subs, matching.UnionMatcher, searcher, context,
  311. q_weight_fn)
  312. # If a scaling factor was given, wrap the matcher in a CoordMatcher to
  313. # alter scores based on term coordination
  314. if self.scale and any(m.term_matchers()):
  315. m = matching.CoordMatcher(m, scale=self.scale)
  316. return m
  317. class SplitOr(Or):
  318. JOINT = " sOr "
  319. SPLIT_DOC_LIMIT = 8000
  320. def matcher(self, searcher, context=None):
  321. from whoosh import collectors
  322. # Get the subqueries
  323. subs = self.subqueries
  324. if not subs:
  325. return matching.NullMatcher()
  326. elif len(subs) == 1:
  327. return subs[0].matcher(searcher, context)
  328. # Sort the subqueries into "small" and "big" queries based on their
  329. # estimated size. This works best for term queries.
  330. reader = searcher.reader()
  331. smallqs = []
  332. bigqs = []
  333. for q in subs:
  334. size = q.estimate_size(reader)
  335. if size <= self.SPLIT_DOC_LIMIT:
  336. smallqs.append(q)
  337. else:
  338. bigqs.append(q)
  339. # Build a pre-scored matcher for the small queries
  340. minscore = 0
  341. smallmatcher = None
  342. if smallqs:
  343. smallmatcher = DefaultOr(smallqs).matcher(searcher, context)
  344. smallmatcher = matching.ArrayMatcher(smallmatcher, context.limit)
  345. minscore = smallmatcher.limit_quality()
  346. if bigqs:
  347. # Get a matcher for the big queries
  348. m = DefaultOr(bigqs).matcher(searcher, context)
  349. # Add the prescored matcher for the small queries
  350. if smallmatcher:
  351. m = matching.UnionMatcher(m, smallmatcher)
  352. # Set the minimum score based on the prescored matcher
  353. m.set_min_quality(minscore)
  354. elif smallmatcher:
  355. # If there are no big queries, just return the prescored matcher
  356. m = smallmatcher
  357. else:
  358. m = matching.NullMatcher()
  359. return m
  360. class PreloadedOr(Or):
  361. JOINT = " pOR "
  362. def _matcher(self, subs, searcher, context):
  363. if context:
  364. scored = context.weighting is not None
  365. else:
  366. scored = True
  367. ms = [sub.matcher(searcher, context) for sub in subs]
  368. doccount = searcher.doc_count_all()
  369. am = matching.ArrayUnionMatcher(ms, doccount, boost=self.boost,
  370. scored=scored)
  371. return am
  372. class DisjunctionMax(CompoundQuery):
  373. """Matches all documents that match any of the subqueries, but scores each
  374. document using the maximum score from the subqueries.
  375. """
  376. def __init__(self, subqueries, boost=1.0, tiebreak=0.0):
  377. CompoundQuery.__init__(self, subqueries, boost=boost)
  378. self.tiebreak = tiebreak
  379. def __unicode__(self):
  380. r = u("DisMax(")
  381. r += " ".join(sorted(text_type(s) for s in self.subqueries))
  382. r += u(")")
  383. if self.tiebreak:
  384. r += u("~") + text_type(self.tiebreak)
  385. return r
  386. __str__ = __unicode__
  387. def normalize(self):
  388. norm = CompoundQuery.normalize(self)
  389. if norm.__class__ is self.__class__:
  390. norm.tiebreak = self.tiebreak
  391. return norm
  392. def requires(self):
  393. if len(self.subqueries) == 1:
  394. return self.subqueries[0].requires()
  395. else:
  396. return set()
  397. def _matcher(self, subs, searcher, context):
  398. r = searcher.reader()
  399. q_weight_fn = lambda q: q.estimate_size(r)
  400. return self._tree_matcher(subs, matching.DisjunctionMaxMatcher,
  401. searcher, context, q_weight_fn,
  402. tiebreak=self.tiebreak)
  403. # Boolean queries
  404. class BinaryQuery(CompoundQuery):
  405. """Base class for binary queries (queries which are composed of two
  406. sub-queries). Subclasses should set the ``matcherclass`` attribute or
  407. override ``matcher()``, and may also need to override ``normalize()``,
  408. ``estimate_size()``, and/or ``estimate_min_size()``.
  409. """
  410. boost = 1.0
  411. def __init__(self, a, b):
  412. self.a = a
  413. self.b = b
  414. self.subqueries = (a, b)
  415. def __eq__(self, other):
  416. return (other and self.__class__ is other.__class__
  417. and self.a == other.a and self.b == other.b)
  418. def __hash__(self):
  419. return (hash(self.__class__.__name__) ^ hash(self.a) ^ hash(self.b))
  420. def needs_spans(self):
  421. return self.a.needs_spans() or self.b.needs_spans()
  422. def apply(self, fn):
  423. return self.__class__(fn(self.a), fn(self.b))
  424. def field(self):
  425. f = self.a.field()
  426. if self.b.field() == f:
  427. return f
  428. def with_boost(self, boost):
  429. return self.__class__(self.a.with_boost(boost),
  430. self.b.with_boost(boost))
  431. def normalize(self):
  432. a = self.a.normalize()
  433. b = self.b.normalize()
  434. if a is qcore.NullQuery and b is qcore.NullQuery:
  435. return qcore.NullQuery
  436. elif a is qcore.NullQuery:
  437. return b
  438. elif b is qcore.NullQuery:
  439. return a
  440. return self.__class__(a, b)
  441. def matcher(self, searcher, context=None):
  442. return self.matcherclass(self.a.matcher(searcher, context),
  443. self.b.matcher(searcher, context))
  444. class AndNot(BinaryQuery):
  445. """Binary boolean query of the form 'a ANDNOT b', where documents that
  446. match b are removed from the matches for a.
  447. """
  448. JOINT = " ANDNOT "
  449. def with_boost(self, boost):
  450. return self.__class__(self.a.with_boost(boost), self.b)
  451. def normalize(self):
  452. a = self.a.normalize()
  453. b = self.b.normalize()
  454. if a is qcore.NullQuery:
  455. return qcore.NullQuery
  456. elif b is qcore.NullQuery:
  457. return a
  458. return self.__class__(a, b)
  459. def requires(self):
  460. return self.a.requires()
  461. def matcher(self, searcher, context=None):
  462. scoredm = self.a.matcher(searcher, context)
  463. notm = self.b.matcher(searcher, searcher.boolean_context())
  464. return matching.AndNotMatcher(scoredm, notm)
  465. class Otherwise(BinaryQuery):
  466. """A binary query that only matches the second clause if the first clause
  467. doesn't match any documents.
  468. """
  469. JOINT = " OTHERWISE "
  470. def matcher(self, searcher, context=None):
  471. m = self.a.matcher(searcher, context)
  472. if not m.is_active():
  473. m = self.b.matcher(searcher, context)
  474. return m
  475. class Require(BinaryQuery):
  476. """Binary query returns results from the first query that also appear in
  477. the second query, but only uses the scores from the first query. This lets
  478. you filter results without affecting scores.
  479. """
  480. JOINT = " REQUIRE "
  481. matcherclass = matching.RequireMatcher
  482. def requires(self):
  483. return self.a.requires() | self.b.requires()
  484. def estimate_size(self, ixreader):
  485. return self.b.estimate_size(ixreader)
  486. def estimate_min_size(self, ixreader):
  487. return self.b.estimate_min_size(ixreader)
  488. def with_boost(self, boost):
  489. return self.__class__(self.a.with_boost(boost), self.b)
  490. def normalize(self):
  491. a = self.a.normalize()
  492. b = self.b.normalize()
  493. if a is qcore.NullQuery or b is qcore.NullQuery:
  494. return qcore.NullQuery
  495. return self.__class__(a, b)
  496. def docs(self, searcher):
  497. return And(self.subqueries).docs(searcher)
  498. def matcher(self, searcher, context=None):
  499. scoredm = self.a.matcher(searcher, context)
  500. requiredm = self.b.matcher(searcher, searcher.boolean_context())
  501. return matching.AndNotMatcher(scoredm, requiredm)
  502. class AndMaybe(BinaryQuery):
  503. """Binary query takes results from the first query. If and only if the
  504. same document also appears in the results from the second query, the score
  505. from the second query will be added to the score from the first query.
  506. """
  507. JOINT = " ANDMAYBE "
  508. matcherclass = matching.AndMaybeMatcher
  509. def normalize(self):
  510. a = self.a.normalize()
  511. b = self.b.normalize()
  512. if a is qcore.NullQuery:
  513. return qcore.NullQuery
  514. if b is qcore.NullQuery:
  515. return a
  516. return self.__class__(a, b)
  517. def requires(self):
  518. return self.a.requires()
  519. def estimate_min_size(self, ixreader):
  520. return self.subqueries[0].estimate_min_size(ixreader)
  521. def docs(self, searcher):
  522. return self.subqueries[0].docs(searcher)
  523. def BooleanQuery(required, should, prohibited):
  524. return AndNot(AndMaybe(And(required), Or(should)),
  525. Or(prohibited)).normalize()