tokenizer.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. """
  2. This module contains a tokenizer for Excel formulae.
  3. The tokenizer is based on the Javascript tokenizer found at
  4. http://ewbi.blogs.com/develops/2004/12/excel_formula_p.html written by Eric
  5. Bachtal
  6. """
  7. import re
  8. class TokenizerError(Exception):
  9. """Base class for all Tokenizer errors."""
  10. class Tokenizer(object):
  11. """
  12. A tokenizer for Excel worksheet formulae.
  13. Converts a unicode string representing an Excel formula (in A1 notation)
  14. into a sequence of `Token` objects.
  15. `formula`: The unicode string to tokenize
  16. Tokenizer defines a method `._parse()` to parse the formula into tokens,
  17. which can then be accessed through the `.items` attribute.
  18. """
  19. SN_RE = re.compile("^[1-9](\\.[0-9]+)?[Ee]$") # Scientific notation
  20. WSPACE_RE = re.compile(r"[ \n]+")
  21. STRING_REGEXES = {
  22. # Inside a string, all characters are treated as literals, except for
  23. # the quote character used to start the string. That character, when
  24. # doubled is treated as a single character in the string. If an
  25. # unmatched quote appears, the string is terminated.
  26. '"': re.compile('"(?:[^"]*"")*[^"]*"(?!")'),
  27. "'": re.compile("'(?:[^']*'')*[^']*'(?!')"),
  28. }
  29. ERROR_CODES = ("#NULL!", "#DIV/0!", "#VALUE!", "#REF!", "#NAME?",
  30. "#NUM!", "#N/A", "#GETTING_DATA")
  31. TOKEN_ENDERS = ',;}) +-*/^&=><%' # Each of these characters, marks the
  32. # end of an operand token
  33. def __init__(self, formula):
  34. self.formula = formula
  35. self.items = []
  36. self.token_stack = [] # Used to keep track of arrays, functions, and
  37. # parentheses
  38. self.offset = 0 # How many chars have we read
  39. self.token = [] # Used to build up token values char by char
  40. self._parse()
  41. def _parse(self):
  42. """Populate self.items with the tokens from the formula."""
  43. if self.offset:
  44. return # Already parsed!
  45. if not self.formula:
  46. return
  47. elif self.formula[0] == '=':
  48. self.offset += 1
  49. else:
  50. self.items.append(Token(self.formula, Token.LITERAL))
  51. return
  52. consumers = (
  53. ('"\'', self._parse_string),
  54. ('[', self._parse_brackets),
  55. ('#', self._parse_error),
  56. (' ', self._parse_whitespace),
  57. ('\n', self._parse_whitespace),
  58. ('+-*/^&=><%', self._parse_operator),
  59. ('{(', self._parse_opener),
  60. (')}', self._parse_closer),
  61. (';,', self._parse_separator),
  62. )
  63. dispatcher = {} # maps chars to the specific parsing function
  64. for chars, consumer in consumers:
  65. dispatcher.update(dict.fromkeys(chars, consumer))
  66. while self.offset < len(self.formula):
  67. if self.check_scientific_notation(): # May consume one character
  68. continue
  69. curr_char = self.formula[self.offset]
  70. if curr_char in self.TOKEN_ENDERS:
  71. self.save_token()
  72. if curr_char in dispatcher:
  73. self.offset += dispatcher[curr_char]()
  74. else:
  75. # TODO: this can probably be sped up using a regex to get to
  76. # the next interesting character
  77. self.token.append(curr_char)
  78. self.offset += 1
  79. self.save_token()
  80. def _parse_string(self):
  81. """
  82. Parse a "-delimited string or '-delimited link.
  83. The offset must be pointing to either a single quote ("'") or double
  84. quote ('"') character. The strings are parsed according to Excel
  85. rules where to escape the delimiter you just double it up. E.g.,
  86. "abc""def" in Excel is parsed as 'abc"def' in Python.
  87. Returns the number of characters matched. (Does not update
  88. self.offset)
  89. """
  90. self.assert_empty_token(can_follow=':')
  91. delim = self.formula[self.offset]
  92. assert delim in ('"', "'")
  93. regex = self.STRING_REGEXES[delim]
  94. match = regex.match(self.formula[self.offset:])
  95. if match is None:
  96. subtype = "string" if delim == '"' else 'link'
  97. raise TokenizerError(
  98. "Reached end of formula while parsing %s in %s" %
  99. (subtype, self.formula))
  100. match = match.group(0)
  101. if delim == '"':
  102. self.items.append(Token.make_operand(match))
  103. else:
  104. self.token.append(match)
  105. return len(match)
  106. def _parse_brackets(self):
  107. """
  108. Consume all the text between square brackets [].
  109. Returns the number of characters matched. (Does not update
  110. self.offset)
  111. """
  112. assert self.formula[self.offset] == '['
  113. lefts = [(t.start(), 1) for t in
  114. re.finditer(r"\[", self.formula[self.offset:])]
  115. rights = [(t.start(), -1) for t in
  116. re.finditer(r"\]", self.formula[self.offset:])]
  117. open_count = 0
  118. for idx, open_close in sorted(lefts + rights):
  119. open_count += open_close
  120. if open_count == 0:
  121. outer_right = idx + 1
  122. self.token.append(
  123. self.formula[self.offset:self.offset + outer_right])
  124. return outer_right
  125. raise TokenizerError("Encountered unmatched '[' in %s" % self.formula)
  126. def _parse_error(self):
  127. """
  128. Consume the text following a '#' as an error.
  129. Looks for a match in self.ERROR_CODES and returns the number of
  130. characters matched. (Does not update self.offset)
  131. """
  132. self.assert_empty_token(can_follow='!')
  133. assert self.formula[self.offset] == '#'
  134. subformula = self.formula[self.offset:]
  135. for err in self.ERROR_CODES:
  136. if subformula.startswith(err):
  137. self.items.append(Token.make_operand(''.join(self.token) + err))
  138. del self.token[:]
  139. return len(err)
  140. raise TokenizerError(
  141. "Invalid error code at position %d in '%s'" %
  142. (self.offset, self.formula))
  143. def _parse_whitespace(self):
  144. """
  145. Consume a string of consecutive spaces.
  146. Returns the number of spaces found. (Does not update self.offset).
  147. """
  148. assert self.formula[self.offset] in (' ', '\n')
  149. self.items.append(Token(self.formula[self.offset], Token.WSPACE))
  150. return self.WSPACE_RE.match(self.formula[self.offset:]).end()
  151. def _parse_operator(self):
  152. """
  153. Consume the characters constituting an operator.
  154. Returns the number of characters consumed. (Does not update
  155. self.offset)
  156. """
  157. if self.formula[self.offset:self.offset + 2] in ('>=', '<=', '<>'):
  158. self.items.append(Token(
  159. self.formula[self.offset:self.offset + 2],
  160. Token.OP_IN
  161. ))
  162. return 2
  163. curr_char = self.formula[self.offset] # guaranteed to be 1 char
  164. assert curr_char in '%*/^&=><+-'
  165. if curr_char == '%':
  166. token = Token('%', Token.OP_POST)
  167. elif curr_char in "*/^&=><":
  168. token = Token(curr_char, Token.OP_IN)
  169. # From here on, curr_char is guaranteed to be in '+-'
  170. elif not self.items:
  171. token = Token(curr_char, Token.OP_PRE)
  172. else:
  173. prev = next((i for i in reversed(self.items)
  174. if i.type != Token.WSPACE), None)
  175. is_infix = prev and (
  176. prev.subtype == Token.CLOSE
  177. or prev.type == Token.OP_POST
  178. or prev.type == Token.OPERAND
  179. )
  180. if is_infix:
  181. token = Token(curr_char, Token.OP_IN)
  182. else:
  183. token = Token(curr_char, Token.OP_PRE)
  184. self.items.append(token)
  185. return 1
  186. def _parse_opener(self):
  187. """
  188. Consumes a ( or { character.
  189. Returns the number of characters consumed. (Does not update
  190. self.offset)
  191. """
  192. assert self.formula[self.offset] in ('(', '{')
  193. if self.formula[self.offset] == '{':
  194. self.assert_empty_token()
  195. token = Token.make_subexp("{")
  196. elif self.token:
  197. token_value = "".join(self.token) + '('
  198. del self.token[:]
  199. token = Token.make_subexp(token_value)
  200. else:
  201. token = Token.make_subexp("(")
  202. self.items.append(token)
  203. self.token_stack.append(token)
  204. return 1
  205. def _parse_closer(self):
  206. """
  207. Consumes a } or ) character.
  208. Returns the number of characters consumed. (Does not update
  209. self.offset)
  210. """
  211. assert self.formula[self.offset] in (')', '}')
  212. token = self.token_stack.pop().get_closer()
  213. if token.value != self.formula[self.offset]:
  214. raise TokenizerError(
  215. "Mismatched ( and { pair in '%s'" % self.formula)
  216. self.items.append(token)
  217. return 1
  218. def _parse_separator(self):
  219. """
  220. Consumes a ; or , character.
  221. Returns the number of characters consumed. (Does not update
  222. self.offset)
  223. """
  224. curr_char = self.formula[self.offset]
  225. assert curr_char in (';', ',')
  226. if curr_char == ';':
  227. token = Token.make_separator(";")
  228. else:
  229. try:
  230. top_type = self.token_stack[-1].type
  231. except IndexError:
  232. token = Token(",", Token.OP_IN) # Range Union operator
  233. else:
  234. if top_type == Token.PAREN:
  235. token = Token(",", Token.OP_IN) # Range Union operator
  236. else:
  237. token = Token.make_separator(",")
  238. self.items.append(token)
  239. return 1
  240. def check_scientific_notation(self):
  241. """
  242. Consumes a + or - character if part of a number in sci. notation.
  243. Returns True if the character was consumed and self.offset was
  244. updated, False otherwise.
  245. """
  246. curr_char = self.formula[self.offset]
  247. if (curr_char in '+-'
  248. and len(self.token) >= 1
  249. and self.SN_RE.match("".join(self.token))):
  250. self.token.append(curr_char)
  251. self.offset += 1
  252. return True
  253. return False
  254. def assert_empty_token(self, can_follow=()):
  255. """
  256. Ensure that there's no token currently being parsed.
  257. Or if there is a token being parsed, it must end with a character in
  258. can_follow.
  259. If there are unconsumed token contents, it means we hit an unexpected
  260. token transition. In this case, we raise a TokenizerError
  261. """
  262. if self.token and self.token[-1] not in can_follow:
  263. raise TokenizerError(
  264. "Unexpected character at position %d in '%s'" %
  265. (self.offset, self.formula))
  266. def save_token(self):
  267. """If there's a token being parsed, add it to the item list."""
  268. if self.token:
  269. self.items.append(Token.make_operand("".join(self.token)))
  270. del self.token[:]
  271. def render(self):
  272. """Convert the parsed tokens back to a string."""
  273. if not self.items:
  274. return ""
  275. elif self.items[0].type == Token.LITERAL:
  276. return self.items[0].value
  277. return "=" + "".join(token.value for token in self.items)
  278. class Token(object):
  279. """
  280. A token in an Excel formula.
  281. Tokens have three attributes:
  282. * `value`: The string value parsed that led to this token
  283. * `type`: A string identifying the type of token
  284. * `subtype`: A string identifying subtype of the token (optional, and
  285. defaults to "")
  286. """
  287. __slots__ = ['value', 'type', 'subtype']
  288. LITERAL = "LITERAL"
  289. OPERAND = "OPERAND"
  290. FUNC = "FUNC"
  291. ARRAY = "ARRAY"
  292. PAREN = "PAREN"
  293. SEP = "SEP"
  294. OP_PRE = "OPERATOR-PREFIX"
  295. OP_IN = "OPERATOR-INFIX"
  296. OP_POST = "OPERATOR-POSTFIX"
  297. WSPACE = "WHITE-SPACE"
  298. def __init__(self, value, type_, subtype=""):
  299. self.value = value
  300. self.type = type_
  301. self.subtype = subtype
  302. # Literal operands:
  303. #
  304. # Literal operands are always of type 'OPERAND' and can be of subtype
  305. # 'TEXT' (for text strings), 'NUMBER' (for all numeric types), 'LOGICAL'
  306. # (for TRUE and FALSE), 'ERROR' (for literal error values), or 'RANGE'
  307. # (for all range references).
  308. TEXT = 'TEXT'
  309. NUMBER = 'NUMBER'
  310. LOGICAL = 'LOGICAL'
  311. ERROR = 'ERROR'
  312. RANGE = 'RANGE'
  313. def __repr__(self):
  314. return u"{0} {1} {2}:".format(self.type, self.subtype, self.value)
  315. @classmethod
  316. def make_operand(cls, value):
  317. """Create an operand token."""
  318. if value.startswith('"'):
  319. subtype = cls.TEXT
  320. elif value.startswith('#'):
  321. subtype = cls.ERROR
  322. elif value in ('TRUE', 'FALSE'):
  323. subtype = cls.LOGICAL
  324. else:
  325. try:
  326. float(value)
  327. subtype = cls.NUMBER
  328. except ValueError:
  329. subtype = cls.RANGE
  330. return cls(value, cls.OPERAND, subtype)
  331. # Subexpresssions
  332. #
  333. # There are 3 types of `Subexpressions`: functions, array literals, and
  334. # parentheticals. Subexpressions have 'OPEN' and 'CLOSE' tokens. 'OPEN'
  335. # is used when parsing the initial expression token (i.e., '(' or '{')
  336. # and 'CLOSE' is used when parsing the closing expression token ('}' or
  337. # ')').
  338. OPEN = "OPEN"
  339. CLOSE = "CLOSE"
  340. @classmethod
  341. def make_subexp(cls, value, func=False):
  342. """
  343. Create a subexpression token.
  344. `value`: The value of the token
  345. `func`: If True, force the token to be of type FUNC
  346. """
  347. assert value[-1] in ('{', '}', '(', ')')
  348. if func:
  349. assert re.match('.+\\(|\\)', value)
  350. type_ = Token.FUNC
  351. elif value in '{}':
  352. type_ = Token.ARRAY
  353. elif value in '()':
  354. type_ = Token.PAREN
  355. else:
  356. type_ = Token.FUNC
  357. subtype = cls.CLOSE if value in ')}' else cls.OPEN
  358. return cls(value, type_, subtype)
  359. def get_closer(self):
  360. """Return a closing token that matches this token's type."""
  361. assert self.type in (self.FUNC, self.ARRAY, self.PAREN)
  362. assert self.subtype == self.OPEN
  363. value = "}" if self.type == self.ARRAY else ")"
  364. return self.make_subexp(value, func=self.type == self.FUNC)
  365. # Separator tokens
  366. #
  367. # Argument separators always have type 'SEP' and can have one of two
  368. # subtypes: 'ARG', 'ROW'. 'ARG' is used for the ',' token, when used to
  369. # delimit either function arguments or array elements. 'ROW' is used for
  370. # the ';' token, which is always used to delimit rows in an array
  371. # literal.
  372. ARG = "ARG"
  373. ROW = "ROW"
  374. @classmethod
  375. def make_separator(cls, value):
  376. """Create a separator token"""
  377. assert value in (',', ';')
  378. subtype = cls.ARG if value == ',' else cls.ROW
  379. return cls(value, cls.SEP, subtype)